diff --git a/OWNERS.md b/OWNERS.md index 4adc81048c59..7b12415e2e47 100644 --- a/OWNERS.md +++ b/OWNERS.md @@ -33,7 +33,7 @@ routing PRs, questions, etc. to the right place. * Data plane, codecs, security, configuration. * Jose Nino ([junr03](https://github.com/junr03)) (jnino@lyft.com) * Outlier detection, HTTP routing, xDS, configuration/operational questions. -* Dhi Aurrahman ([dio](https://github.com/dio)) (dio@tetrate.io) +* Dhi Aurrahman ([dio](https://github.com/dio)) (dio@rockybars.com) * Lua, access logging, and general miscellany. * Joshua Marantz ([jmarantz](https://github.com/jmarantz)) (jmarantz@google.com) * Stats, abseil, scalability, and performance. diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index e282e6289a7e..f4e3386cfac9 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -584,9 +584,9 @@ message Cluster { } // [#not-implemented-hide:] - message PrefetchPolicy { + message PreconnectPolicy { // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching + // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting // will only be done if the upstream is healthy. // // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be @@ -595,46 +595,46 @@ message Cluster { // serve both the original and presumed follow-up stream. // // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections prefetched. + // active streams, there would be 100 connections in use, and 50 connections preconnected. // This might be a useful value for something like short lived single-use connections, // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue // in case of unexpected disconnects where the connection could not be reused. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight. This means in steady state if a connection is torn down, // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be - // prefetched. + // preconnected. // - // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can - // harm latency more than the prefetching helps. - google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; // Indicates how many many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // Unlike per_upstream_preconnect_ratio this preconnects across the upstream instances in a // cluster, doing best effort predictions of what upstream would be picked next and // pre-establishing a connection. // - // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be preconnected - one to the first upstream for this // cluster, one to the second on the assumption there will be a follow-up stream. // - // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // Preconnecting will be limited to one preconnect per configured upstream in the cluster. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for // connection establishment. // - // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, - // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each upstream. // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. - google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + google.protobuf.DoubleValue predictive_preconnect_ratio = 2 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } @@ -1029,8 +1029,8 @@ message Cluster { TrackClusterStats track_cluster_stats = 49; // [#not-implemented-hide:] - // Prefetch configuration for this cluster. - PrefetchPolicy prefetch_policy = 50; + // Preconnect configuration for this cluster. + PreconnectPolicy preconnect_policy = 50; // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate // connection pool for every downstream connection diff --git a/api/envoy/config/cluster/v3/outlier_detection.proto b/api/envoy/config/cluster/v3/outlier_detection.proto index c0b4d5732db5..9bb5633e6269 100644 --- a/api/envoy/config/cluster/v3/outlier_detection.proto +++ b/api/envoy/config/cluster/v3/outlier_detection.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // See the :ref:`architecture overview ` for // more information on outlier detection. -// [#next-free-field: 21] +// [#next-free-field: 22] message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.OutlierDetection"; @@ -34,7 +34,8 @@ message OutlierDetection { google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected. + // base time multiplied by the number of times the host has been ejected and is + // capped by :ref:`max_ejection_time`. // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; @@ -148,4 +149,9 @@ message OutlierDetection { // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. google.protobuf.UInt32Value failure_percentage_request_volume = 20; + + // The maximum time that a host is ejected for. See :ref:`base_ejection_time` + // for more information. + // Defaults to 300000ms or 300s. + google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; } diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index be402e646ee9..8b30ab23f265 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -589,12 +589,12 @@ message Cluster { } // [#not-implemented-hide:] - message PrefetchPolicy { + message PreconnectPolicy { option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.PrefetchPolicy"; + "envoy.config.cluster.v3.Cluster.PreconnectPolicy"; // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching + // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting // will only be done if the upstream is healthy. // // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be @@ -603,46 +603,46 @@ message Cluster { // serve both the original and presumed follow-up stream. // // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections prefetched. + // active streams, there would be 100 connections in use, and 50 connections preconnected. // This might be a useful value for something like short lived single-use connections, // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue // in case of unexpected disconnects where the connection could not be reused. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight. This means in steady state if a connection is torn down, // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be - // prefetched. + // preconnected. // - // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can - // harm latency more than the prefetching helps. - google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; // Indicates how many many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // Unlike per_upstream_preconnect_ratio this preconnects across the upstream instances in a // cluster, doing best effort predictions of what upstream would be picked next and // pre-establishing a connection. // - // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be preconnected - one to the first upstream for this // cluster, one to the second on the assumption there will be a follow-up stream. // - // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // Preconnecting will be limited to one preconnect per configured upstream in the cluster. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for // connection establishment. // - // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, - // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each upstream. // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. - google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + google.protobuf.DoubleValue predictive_preconnect_ratio = 2 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } @@ -969,8 +969,8 @@ message Cluster { TrackClusterStats track_cluster_stats = 49; // [#not-implemented-hide:] - // Prefetch configuration for this cluster. - PrefetchPolicy prefetch_policy = 50; + // Preconnect configuration for this cluster. + PreconnectPolicy preconnect_policy = 50; // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate // connection pool for every downstream connection diff --git a/api/envoy/config/cluster/v4alpha/outlier_detection.proto b/api/envoy/config/cluster/v4alpha/outlier_detection.proto index 29a1e01270d9..9b2efeb53146 100644 --- a/api/envoy/config/cluster/v4alpha/outlier_detection.proto +++ b/api/envoy/config/cluster/v4alpha/outlier_detection.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // See the :ref:`architecture overview ` for // more information on outlier detection. -// [#next-free-field: 21] +// [#next-free-field: 22] message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.OutlierDetection"; @@ -34,7 +34,8 @@ message OutlierDetection { google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected. + // base time multiplied by the number of times the host has been ejected and is + // capped by :ref:`max_ejection_time`. // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; @@ -148,4 +149,9 @@ message OutlierDetection { // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. google.protobuf.UInt32Value failure_percentage_request_volume = 20; + + // The maximum time that a host is ejected for. See :ref:`base_ejection_time` + // for more information. + // Defaults to 300000ms or 300s. + google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; } diff --git a/api/envoy/config/core/v3/base.proto b/api/envoy/config/core/v3/base.proto index 5b5339ea5bc5..74a7d55a7374 100644 --- a/api/envoy/config/core/v3/base.proto +++ b/api/envoy/config/core/v3/base.proto @@ -331,10 +331,10 @@ message DataSource { string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; + bytes inline_bytes = 2; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_len: 1}]; + string inline_string = 3; } } diff --git a/api/envoy/config/core/v4alpha/base.proto b/api/envoy/config/core/v4alpha/base.proto index 27b0b356b1a7..6a967b1ae5f2 100644 --- a/api/envoy/config/core/v4alpha/base.proto +++ b/api/envoy/config/core/v4alpha/base.proto @@ -329,10 +329,10 @@ message DataSource { string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; + bytes inline_bytes = 2; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_len: 1}]; + string inline_string = 3; } } diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto index c389c841e0ba..c2236a34d3c4 100644 --- a/api/envoy/config/listener/v3/listener_components.proto +++ b/api/envoy/config/listener/v3/listener_components.proto @@ -218,7 +218,11 @@ message FilterChain { // load balancers including the AWS ELB support this option. If the option is // absent or set to false, Envoy will use the physical peer address of the // connection as the remote address. - google.protobuf.BoolValue use_proxy_proto = 4; + // + // This field is deprecated. Add a + // :ref:`PROXY protocol listener filter ` + // explicitly instead. + google.protobuf.BoolValue use_proxy_proto = 4 [deprecated = true]; // [#not-implemented-hide:] filter chain metadata. core.v3.Metadata metadata = 5; diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto index e7fe84482475..021aadc928c3 100644 --- a/api/envoy/config/listener/v4alpha/listener_components.proto +++ b/api/envoy/config/listener/v4alpha/listener_components.proto @@ -203,9 +203,9 @@ message FilterChain { google.protobuf.Duration rebuild_timeout = 1; } - reserved 2; + reserved 2, 4; - reserved "tls_context"; + reserved "tls_context", "use_proxy_proto"; // The criteria to use when matching a connection to this filter chain. FilterChainMatch filter_chain_match = 1; @@ -216,14 +216,6 @@ message FilterChain { // list is empty, the connection will close by default. repeated Filter filters = 3; - // Whether the listener should expect a PROXY protocol V1 header on new - // connections. If this option is enabled, the listener will assume that that - // remote address of the connection is the one specified in the header. Some - // load balancers including the AWS ELB support this option. If the option is - // absent or set to false, Envoy will use the physical peer address of the - // connection as the remote address. - google.protobuf.BoolValue use_proxy_proto = 4; - // [#not-implemented-hide:] filter chain metadata. core.v4alpha.Metadata metadata = 5; diff --git a/bazel/PPROF.md b/bazel/PPROF.md index fa0a4f012555..74987b1986b4 100644 --- a/bazel/PPROF.md +++ b/bazel/PPROF.md @@ -1,4 +1,24 @@ -# CPU or memory consumption testing with `pprof` +# Table of Contents + + * [CPU or memory consumption testing with gperftools and pprof](#cpu-or-memory-consumption-testing-with-gperftools-and-pprof) + * [Collecting CPU or heap profile for a full execution of envoy](#collecting-cpu-or-heap-profile-for-a-full-execution-of-envoy) + * [Compiling a statically-linked Envoy](#compiling-a-statically-linked-envoy) + * [Collecting the profile](#collecting-the-profile) + * [Analyzing the profile](#analyzing-the-profile) + * [Collecting CPU or heap profile for the full execution of a test target](#collecting-cpu-or-heap-profile-for-the-full-execution-of-a-test-target) + * [Starting and stopping profile programmatically](#starting-and-stopping-profile-programmatically) + * [Add tcmalloc_dep dependency to envoy_cc_library rules](#add-tcmalloc_dep-dependency-to-envoy_cc_library-rules) + * [Memory Profiling in Tests](#memory-profiling-in-tests) + * [Enabling Memory Profiling in Tests](#enabling-memory-profiling-in-tests) + * [Bazel Configuration](#bazel-configuration) + * [Methodology](#methodology) + * [Analyzing with pprof](#analyzing-with-pprof) + * [Alternatives to gperftools](#alternatives-to-gperftools) + * [On-CPU analysis](#on-cpu-analysis) + * [Memory analysis](#memory-analysis) + * [Performance annotations](#performance-annotations) + +# CPU or memory consumption testing with `gperftools` and `pprof` To use `pprof` to analyze performance and memory consumption in Envoy, you can use the built-in statically linked profiler provided by @@ -155,3 +175,145 @@ More complex flame/graph charts can be generated and viewed in a browser, which is often more helpful than text-based output: $ pprof -http=localhost:9999 bazel-bin/source/exe/envoy main_common_base* + +# Alternatives to `gperftools` + +## On-CPU analysis + +By default Envoy is built without gperftools. In this case the same results can be +achieved for On-CPU analysis with the `perf` tool. For this there is no need to tweak +Envoy's environment, you can even do measurements for an instance running in production +(beware of possible performance hit though). Simply run: +``` +$ perf record -g -F 99 -p `pgrep envoy` +^C[ perf record: Woken up 1 times to write data ] +[ perf record: Captured and wrote 0.694 MB perf.data (1532 samples) ] +``` + +The program will store the collected sampling data in the file `perf.data` whose +format is also understood by recent enough versions of `pprof`: +``` +$ pprof -http=localhost:9999 perf.data +``` +## Memory analysis + +Unfortunately `perf` doesn't support heap profiling analogous to `gperftools`, but still +we can get some insight into memory allocations with +[Brendan Gregg's tools](http://www.brendangregg.com/FlameGraphs/memoryflamegraphs.html). +You'll need to have [bcc](https://github.com/iovisor/bcc) installed in your system and a +copy of [FlameGraph](https://github.com/brendangregg/FlameGraph): +``` +$ git clone https://github.com/brendangregg/FlameGraph +$ sudo /usr/share/bcc/tools/stackcount -p `pgrep envoy` \ + -U "/full/path/to/envoy/bazel-bin/source/exe/envoy-static:_Znwm" > out.stack +$ ./FlameGraph/stackcollapse.pl < out.stacks | ./FlameGraph/flamegraph.pl --color=mem \ + --title="operator new(std::size_t) Flame Graph" --countname="calls" > out.svg +``` + +The `stackcount` utility counts function calls and their stack traces using eBPF probes. +Since Envoy by default links statically to tcmalloc which provides its own implementation +of memory management functions the used uprobe looks like +```/full/path/to/envoy/bazel-bin/source/exe/envoy-static:_Znwm```. The part before +the colon is a library name (a path to Envoy's binary in our case). The part after the +colon is a function name as it looks like in the output of `objdump -tT /path/to/lib`, +that is mangled in our case. To get an idea how your compiler mangles the name you +can use this one-liner: +``` +$ echo -e "#include \n void* operator new(std::size_t) {} " | g++ -x c++ -S - -o- 2> /dev/null + .file "" + .text + .globl _Znwm + .type _Znwm, @function +_Znwm: +.LFB73: + .cfi_startproc + pushq %rbp + .cfi_def_cfa_offset 16 + .cfi_offset 6, -16 + movq %rsp, %rbp + .cfi_def_cfa_register 6 + movq %rdi, -8(%rbp) + nop + popq %rbp + .cfi_def_cfa 7, 8 + ret + .cfi_endproc +.LFE73: + .size _Znwm, .-_Znwm + .ident "GCC: (GNU) 10.2.1 20201016 (Red Hat 10.2.1-6)" + .section .note.GNU-stack,"",@progbits +``` + +WARNING: The name is going to be different on 32-bit and 64-bit platforms due to different sizes +of `size_t`. Also ```void* operator new[](std::size_t)``` is a separate function as well as `malloc()`. +The latter is a C function and hence not mangled by the way. + +`stackcount` doesn't count how much memory is allocated, but how often. To answer the "how much" +question you could use Brendan's +[mallocstacks](https://github.com/brendangregg/BPF-tools/blob/master/old/2017-12-23/mallocstacks.py) +tool, but it works only for `malloc()` calls. You need to modify it to take into +account other memory allocating functions. + +# Performance annotations + +In case there is a need to measure how long a code path takes time to execute in Envoy you may +resort to instrumenting the code with the +[performance annotations](https://github.com/envoyproxy/envoy/blob/master/source/common/common/perf_annotation.h). + +There are two types of the annotations. The first one is used to measure operations limited by +a common lexical scope. For example: + +```c++ +void doHeavyLifting() { + PERF_OPERATION(op); + bool success = doSomething(); + if (success) { + finalizeSuccessfulOperation(); + PERF_RECORD(op, "successful", "heavy lifting"); + } else { + recoverFromFailure(); + PERF_RECORD(op, "failed", "heavy lifting") + } +} +``` + +The recorded performance data can be dumped to stdout with a call to `PERF_DUMP()`: +``` +Duration(us) # Calls Mean(ns) StdDev(ns) Min(ns) Max(ns) Category Description + 2837 22 128965 37035.5 109731 241957 successful heavy lifting + 204 13 15745 2449.4 13323 20446 failed heavy lifting +``` + +The second type is performance annotations owned by a class instance. They can measure +operations spanned across the instance's methods: +```c++ +class CustomFilter : public Http::StreamEncoderFilter { +public: + + ... + // Http::StreamEncoderFilter + Http::FilterHeadersStatus encodeHeaders(Http::ResponseHeaderMap& headers, + bool end_stream) override { + PERF_OWNED_OPERATION(perf_operation_); + return Http::FilterHeadersStatus::Continue; + } + + Http::FilterDataStatus encodeData(Buffer::Instance& buffer, bool end_stream) override { + if (end_stream) { + PERF_OWNED_RECORD(perf_operation_, "without trailers", "stream encoding") + } + return Http::FilterDataStatus::Continue; + } + + Http::FilterTrailersStatus encodeTrailers(Http::ResponseTrailerMap&) override { + PERF_OWNED_RECORD(perf_operation_, "with trailers", "stream encoding"); + return Http::FilterTrailersStatus::Continue; + } + + ... + +private: + ... + PERF_OWNER(perf_operation_); +}; +``` diff --git a/bazel/cel-cpp.patch b/bazel/cel-cpp.patch deleted file mode 100644 index aee357153fdf..000000000000 --- a/bazel/cel-cpp.patch +++ /dev/null @@ -1,33 +0,0 @@ -diff --git a/eval/public/containers/field_backed_map_impl.cc b/eval/public/containers/field_backed_map_impl.cc -index cd56f51..4d2a546 100644 ---- a/eval/public/containers/field_backed_map_impl.cc -+++ b/eval/public/containers/field_backed_map_impl.cc -@@ -117,7 +117,9 @@ int FieldBackedMapImpl::size() const { - const CelList* FieldBackedMapImpl::ListKeys() const { return key_list_.get(); } - - absl::optional FieldBackedMapImpl::operator[](CelValue key) const { --#ifdef GOOGLE_PROTOBUF_HAS_CEL_MAP_REFLECTION_FRIEND -+#ifdef XXX_GOOGLE_PROTOBUF_HAS_CEL_MAP_REFLECTION_FRIEND -+ static_assert(false); -+ - // Fast implementation. - google::protobuf::MapKey inner_key; - switch (key.type()) { -@@ -171,7 +173,7 @@ absl::optional FieldBackedMapImpl::operator[](CelValue key) const { - return CreateErrorValue(arena_, status.message()); - } - return result; --#else // GOOGLE_PROTOBUF_HAS_CEL_MAP_REFLECTION_FRIEND -+#else // XXX_GOOGLE_PROTOBUF_HAS_CEL_MAP_REFLECTION_FRIEND - // Slow implementation. - CelValue result = CelValue::CreateNull(); - CelValue inner_key = CelValue::CreateNull(); -@@ -228,7 +230,7 @@ absl::optional FieldBackedMapImpl::operator[](CelValue key) const { - } - - return {}; --#endif // GOOGLE_PROTOBUF_HAS_CEL_MAP_REFLECTION_FRIEND -+#endif // XXX_GOOGLE_PROTOBUF_HAS_CEL_MAP_REFLECTION_FRIEND - } - - } // namespace runtime diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index 089a0e5c8c72..ac6c3886d8db 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -209,10 +209,6 @@ def envoy_cc_test_library( copts = [], alwayslink = 1, **kargs): - deps = deps + [ - repository + "//test/test_common:printers_includes", - ] - _envoy_cc_test_infrastructure_library( name, srcs, diff --git a/bazel/foreign_cc/curl.patch b/bazel/foreign_cc/curl.patch index 7c2a7bc129e0..e602ba03dc44 100644 --- a/bazel/foreign_cc/curl.patch +++ b/bazel/foreign_cc/curl.patch @@ -18,12 +18,3 @@ index ec1cfa782..0c5a72f00 100644 + string(REGEX REPLACE "/MD" "/MT" ${flags_var} "${${flags_var}}") + endif() + endforeach() -diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt -index 911c9096d..ba6af1bf1 100644 ---- a/lib/CMakeLists.txt -+++ b/lib/CMakeLists.txt -@@ -91,4 +91,0 @@ add_library( --if(MSVC AND NOT BUILD_SHARED_LIBS) -- set_target_properties(${LIB_NAME} PROPERTIES STATIC_LIBRARY_FLAGS ${CMAKE_EXE_LINKER_FLAGS}) --endif() -- diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index db873d30c16f..7020233f98ee 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -351,15 +351,7 @@ def _com_github_zlib_ng_zlib_ng(): ) def _com_google_cel_cpp(): - external_http_archive( - "com_google_cel_cpp", - patch_args = ["-p1"], - # Patches to remove "fast" protobuf-internal access - # The patch can be removed when the "fast" access is safe to be enabled back. - # This requires public visibility of Reflection::LookupMapValue in protobuf and - # any release of cel-cpp after 10/27/2020. - patches = ["@envoy//bazel:cel-cpp.patch"], - ) + external_http_archive("com_google_cel_cpp") external_http_archive("rules_antlr") # Parser dependencies diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index dbfc5221d054..7d27b96c144b 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -1,6 +1,6 @@ load("@rules_python//python:repositories.bzl", "py_repositories") load("@rules_python//python:pip.bzl", "pip3_import", "pip_repositories") -load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_raze__fetch_remote_crates") +load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_fetch_remote_crates") # Python dependencies. def _python_deps(): @@ -101,4 +101,4 @@ def _python_deps(): # Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). def envoy_dependencies_extra(): _python_deps() - proxy_wasm_cpp_host_raze__fetch_remote_crates() + proxy_wasm_cpp_host_fetch_remote_crates() diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 458c69f1f82b..8d550a63cc2e 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -649,8 +649,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "curl", project_desc = "Library for transferring data with URLs", project_url = "https://curl.haxx.se", - version = "7.72.0", - sha256 = "d4d5899a3868fbb6ae1856c3e55a32ce35913de3956d1973caccd37bd0174fa2", + version = "7.74.0", + sha256 = "e56b3921eeb7a2951959c02db0912b5fcd5fdba5aca071da819e1accf338bbd7", strip_prefix = "curl-{version}", urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"], use_category = ["dataplane_ext", "observability_ext"], @@ -660,7 +660,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.grpc_credentials.aws_iam", "envoy.tracers.opencensus", ], - release_date = "2020-08-19", + release_date = "2020-12-09", cpe = "cpe:2.3:a:haxx:curl:*", ), com_googlesource_chromium_v8 = dict( @@ -707,8 +707,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Common Expression Language (CEL) C++ library", project_desc = "Common Expression Language (CEL) C++ library", project_url = "https://opensource.google/projects/cel", - version = "47244a458e7739ad38e178a3f3892d197de4a574", - sha256 = "51b1af23cb703a94d18fe7a5e2696f96cde5bc35a279c6c844e6363aea3982fb", + version = "9841e3ee251f3cc4cd5b6dd9deee6818bc9f2854", + sha256 = "7e42cbad7d1068d6e7891ad101e2863e727692136d6b3a817c487b3cc7bcfdcc", strip_prefix = "cel-cpp-{version}", urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -721,7 +721,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], - release_date = "2020-10-25", + release_date = "2020-12-17", cpe = "N/A", ), com_github_google_flatbuffers = dict( @@ -844,8 +844,8 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "WebAssembly for Proxies (C++ host implementation)", project_desc = "WebAssembly for Proxies (C++ host implementation)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", - version = "15827110ac35fdac9abdb6b05d04ee7ee2044dae", - sha256 = "77a2671205eb0973bee375a1bee4099edef991350433981f6e3508780318117d", + version = "6dab125d7a668c7158848b6f48c67fd827c952e6", + sha256 = "b5c73ed053a7079bd8bf53b14c4811e87ae521d9fcf4769ec5b248202a27600d", strip_prefix = "proxy-wasm-cpp-host-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], @@ -860,7 +860,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( "envoy.wasm.runtime.wavm", "envoy.wasm.runtime.wasmtime", ], - release_date = "2020-11-12", + release_date = "2020-12-16", cpe = "N/A", ), proxy_wasm_rust_sdk = dict( diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh index ef29e6c92587..88f8c406dd65 100755 --- a/ci/mac_ci_setup.sh +++ b/ci/mac_ci_setup.sh @@ -6,8 +6,10 @@ # https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for # a list of pre-installed tools in the macOS image. -# https://github.com/actions/virtual-environments/issues/1811 -brew uninstall openssl@1.0.2t +# https://github.com/actions/virtual-environments/issues/2322 +if command -v 2to3 > /dev/null; then + rm -f "$(command -v 2to3)" +fi export HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_RETRY_ATTEMPTS=10 diff --git a/configs/envoy_double_proxy.template.yaml b/configs/envoy_double_proxy.template.yaml index 1dcaaf84765a..e5a7ab23f063 100644 --- a/configs/envoy_double_proxy.template.yaml +++ b/configs/envoy_double_proxy.template.yaml @@ -5,6 +5,12 @@ protocol: {{protocol}} address: {{address}} port_value: {{port_value}} + {% if proxy_proto %} + listener_filters: + - name: envoy.filters.listener.proxy_protocol + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol + {% endif %} filter_chains: - filter_chain_match: {} {% if tls %} @@ -23,9 +29,6 @@ - h2 - http/1.1 {% endif %} - {% if proxy_proto %} - use_proxy_proto: true - {%endif -%} filters: - name: envoy.filters.network.http_connection_manager typed_config: diff --git a/configs/envoy_front_proxy.template.yaml b/configs/envoy_front_proxy.template.yaml index e60670f112c1..a2d3dea51ade 100644 --- a/configs/envoy_front_proxy.template.yaml +++ b/configs/envoy_front_proxy.template.yaml @@ -7,6 +7,12 @@ protocol: {{protocol}} address: {{address}} port_value: {{port_value}} + {% if proxy_proto %} + listener_filters: + - name: envoy.filters.listener.proxy_protocol + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol + {% endif %} filter_chains: {% if tls %} - transport_socket: @@ -28,9 +34,6 @@ #double proxy configuration. verify_certificate_hash: "0000000000000000000000000000000000000000000000000000000000000000" {% endif %} - {%if proxy_proto%} - use_proxy_proto: true - {%endif%} {%endif %} filters: - name: envoy.filters.network.http_connection_manager diff --git a/configs/google-vrp/envoy-edge.yaml b/configs/google-vrp/envoy-edge.yaml index 7faa6caf2d2f..fc95700f115c 100644 --- a/configs/google-vrp/envoy-edge.yaml +++ b/configs/google-vrp/envoy-edge.yaml @@ -27,6 +27,11 @@ static_resources: address: 0.0.0.0 port_value: 10000 per_connection_buffer_limit_bytes: 32768 # 32 KiB + # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. + # listener_filters: + # - name: envoy.filters.listener.proxy_protocol + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol filter_chains: - transport_socket: name: envoy.transport_sockets.tls @@ -36,8 +41,6 @@ static_resources: tls_certificates: - certificate_chain: { filename: "certs/servercert.pem" } private_key: { filename: "certs/serverkey.pem" } - # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. - # use_proxy_proto: true filters: - name: envoy.filters.network.http_connection_manager typed_config: diff --git a/docs/root/configuration/best_practices/_include/edge.yaml b/docs/root/configuration/best_practices/_include/edge.yaml index dc629699f0a8..21a6b7e7a5c1 100644 --- a/docs/root/configuration/best_practices/_include/edge.yaml +++ b/docs/root/configuration/best_practices/_include/edge.yaml @@ -34,6 +34,10 @@ static_resources: listener_filters: - name: "envoy.filters.listener.tls_inspector" typed_config: {} + # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. + # - name: envoy.filters.listener.proxy_protocol + # typed_config: + # "@type": type.googleapis.com/envoy.extensions.filters.listener.proxy_protocol.v3.ProxyProtocol per_connection_buffer_limit_bytes: 32768 # 32 KiB filter_chains: - filter_chain_match: @@ -46,8 +50,6 @@ static_resources: tls_certificates: - certificate_chain: { filename: "certs/servercert.pem" } private_key: { filename: "certs/serverkey.pem" } - # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. - # use_proxy_proto: true filters: - name: envoy.filters.network.http_connection_manager typed_config: diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index fa3993706b6a..143def096e48 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -506,7 +506,7 @@ Supported variable names are: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`Proxy Protocol filter ` or :ref:`x-forwarded-for `. %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% diff --git a/docs/root/configuration/observability/access_log/usage.rst b/docs/root/configuration/observability/access_log/usage.rst index 73ca330f7333..7d4b402fbd61 100644 --- a/docs/root/configuration/observability/access_log/usage.rst +++ b/docs/root/configuration/observability/access_log/usage.rst @@ -322,7 +322,7 @@ The following command operators are supported: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`Proxy Protocol filter ` or :ref:`x-forwarded-for `. %DOWNSTREAM_REMOTE_ADDRESS_WITHOUT_PORT% @@ -332,7 +332,7 @@ The following command operators are supported: .. note:: This may not be the physical remote address of the peer if the address has been inferred from - :ref:`proxy proto ` or :ref:`x-forwarded-for + :ref:`Proxy Protocol filter ` or :ref:`x-forwarded-for `. %DOWNSTREAM_DIRECT_REMOTE_ADDRESS% @@ -342,7 +342,7 @@ The following command operators are supported: .. note:: This is always the physical remote address of the peer even if the downstream remote address has - been inferred from :ref:`proxy proto ` + been inferred from :ref:`Proxy Protocol filter ` or :ref:`x-forwarded-for `. %DOWNSTREAM_DIRECT_REMOTE_ADDRESS_WITHOUT_PORT% @@ -352,7 +352,7 @@ The following command operators are supported: .. note:: This is always the physical remote address of the peer even if the downstream remote address has - been inferred from :ref:`proxy proto ` + been inferred from :ref:`Proxy Protocol filter ` or :ref:`x-forwarded-for `. %DOWNSTREAM_LOCAL_ADDRESS% diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst b/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst index d86cb002aa59..9f6c8808d17e 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_runtime.rst @@ -48,12 +48,12 @@ outlier_detection.consecutive_local_origin_failure setting in outlier detection outlier_detection.interval_ms - :ref:`interval_ms + :ref:`interval ` setting in outlier detection outlier_detection.base_ejection_time_ms - :ref:`base_ejection_time_ms + :ref:`base_ejection_time ` setting in outlier detection @@ -127,6 +127,11 @@ outlier_detection.failure_percentage_threshold ` setting in outlier detection +outlier_detection.max_ejection_time_ms + :ref:`max_ejection_time + ` + setting in outlier detection + Core ---- diff --git a/docs/root/intro/arch_overview/upstream/outlier.rst b/docs/root/intro/arch_overview/upstream/outlier.rst index fd9dc7158a74..fcfb6f6da837 100644 --- a/docs/root/intro/arch_overview/upstream/outlier.rst +++ b/docs/root/intro/arch_overview/upstream/outlier.rst @@ -48,6 +48,8 @@ It is important to understand that a cluster may be shared among several filter ejects a host based on its outlier detection type, other filter chains will be also affected even though their outlier detection type would not have ejected that host. +.. _arch_overview_outlier_detection_algorithm: + Ejection algorithm ------------------ @@ -63,10 +65,19 @@ ejection algorithm works as follows: #. The host is ejected for some number of milliseconds. Ejection means that the host is marked unhealthy and will not be used during load balancing unless the load balancer is in a :ref:`panic ` scenario. The number of milliseconds - is equal to the :ref:`outlier_detection.base_ejection_time_ms + is equal to the :ref:`outlier_detection.base_ejection_time ` value - multiplied by the number of times the host has been ejected. This causes hosts to get ejected - for longer and longer periods if they continue to fail. + multiplied by the number of times the host has been ejected in a row. This causes hosts to get ejected + for longer and longer periods if they continue to fail. When ejection time reaches + :ref:`outlier_detection.max_ejection_time` it does not increase any more. + When the host becomes healthy, the ejection time + multiplier decreases with time. The host's health is checked at intervals equal to + :ref:`outlier_detection.interval`. + If the host is healthy during that check, the ejection time multiplier is decremented. Assuming that the host stays healthy + it would take approximately :ref:`outlier_detection.max_ejection_time` / + :ref:`outlier_detection.base_ejection_time` * + :ref:`outlier_detection.interval` seconds to bring down the ejection time to the minimum + value :ref:`outlier_detection.base_ejection_time`. #. An ejected host will automatically be brought back into service after the ejection time has been satisfied. Generally, outlier detection is used alongside :ref:`active health checking ` for a comprehensive health checking solution. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index b8aec179a9cb..05f7ab64dfc8 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -25,6 +25,7 @@ Minor Behavior Changes * listener: injection of the :ref:`TLS inspector ` has been disabled by default. This feature is controlled by the runtime guard `envoy.reloadable_features.disable_tls_inspector_injection`. * memory: enable new tcmalloc with restartable sequences for aarch64 builds. * mongo proxy metrics: swapped network connection remote and local closed counters previously set reversed (`cx_destroy_local_with_active_rq` and `cx_destroy_remote_with_active_rq`). +* outlier detection: added :ref:`max_ejection_time ` to limit ejection time growth when a node stays unhealthy for extended period of time. By default :ref:`max_ejection_time ` limits ejection time to 5 minutes. Additionally, when the node stays healthy, ejection time decreases. See :ref:`ejection algorithm` for more info. Previously, ejection time could grow without limit and never decreased. * performance: improve performance when handling large HTTP/1 bodies. * tls: removed RSA key transport and SHA-1 cipher suites from the client-side defaults. * watchdog: the watchdog action :ref:`abort_action ` is now the default action to terminate the process if watchdog kill / multikill is enabled. @@ -39,6 +40,7 @@ Bug Fixes * config: validate that upgrade configs have a non-empty :ref:`upgrade_type `, fixing a bug where an errant "-" could result in unexpected behavior. * dns: fix a bug where custom resolvers provided in configuration were not preserved after network issues. * dns_filter: correctly associate DNS response IDs when multiple queries are received. +* grpc mux: fix sending node again after stream is reset when ::ref:`set_node_on_first_message_only ` is set. * http: fixed URL parsing for HTTP/1.1 fully qualified URLs and connect requests containing IPv6 addresses. * http: reject requests with missing required headers after filter chain processing. * http: sending CONNECT_ERROR for HTTP/2 where appropriate during CONNECT requests. @@ -105,6 +107,7 @@ Deprecated * compression: the fields :ref:`content_length `, :ref:`content_type `, :ref:`disable_on_etag_header `, :ref:`remove_accept_encoding_header ` and :ref:`runtime_enabled ` of the :ref:`Compressor ` message have been deprecated in favor of :ref:`response_direction_config `. * formatter: :ref:`text_format ` is now deprecated in favor of :ref:`text_format_source `. To migrate existing text format strings, use the :ref:`inline_string ` field. * gzip: :ref:`HTTP Gzip filter ` is rejected now unless explicitly allowed with :ref:`runtime override ` `envoy.deprecated_features.allow_deprecated_gzip_http_filter` set to `true`. +* listener: :ref:`use_proxy_proto ` has been deprecated in favor of adding a :ref:`PROXY protocol listener filter ` explicitly. * logging: the `--log-format-prefix-with-location` option is removed. * ratelimit: the :ref:`dynamic metadata ` action is deprecated in favor of the more generic :ref:`metadata ` action. * stats: the `--use-fake-symbol-table` option is removed. diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 1f9860fe0395..cce56c9a1e33 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -584,9 +584,9 @@ message Cluster { } // [#not-implemented-hide:] - message PrefetchPolicy { + message PreconnectPolicy { // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching + // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting // will only be done if the upstream is healthy. // // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be @@ -595,46 +595,46 @@ message Cluster { // serve both the original and presumed follow-up stream. // // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections prefetched. + // active streams, there would be 100 connections in use, and 50 connections preconnected. // This might be a useful value for something like short lived single-use connections, // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue // in case of unexpected disconnects where the connection could not be reused. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight. This means in steady state if a connection is torn down, // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be - // prefetched. + // preconnected. // - // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can - // harm latency more than the prefetching helps. - google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; // Indicates how many many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // Unlike per_upstream_preconnect_ratio this preconnects across the upstream instances in a // cluster, doing best effort predictions of what upstream would be picked next and // pre-establishing a connection. // - // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be preconnected - one to the first upstream for this // cluster, one to the second on the assumption there will be a follow-up stream. // - // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // Preconnecting will be limited to one preconnect per configured upstream in the cluster. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for // connection establishment. // - // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, - // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each upstream. // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. - google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + google.protobuf.DoubleValue predictive_preconnect_ratio = 2 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } @@ -1027,8 +1027,8 @@ message Cluster { TrackClusterStats track_cluster_stats = 49; // [#not-implemented-hide:] - // Prefetch configuration for this cluster. - PrefetchPolicy prefetch_policy = 50; + // Preconnect configuration for this cluster. + PreconnectPolicy preconnect_policy = 50; // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate // connection pool for every downstream connection diff --git a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto b/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto index c0b4d5732db5..9bb5633e6269 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/outlier_detection.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // See the :ref:`architecture overview ` for // more information on outlier detection. -// [#next-free-field: 21] +// [#next-free-field: 22] message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.OutlierDetection"; @@ -34,7 +34,8 @@ message OutlierDetection { google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected. + // base time multiplied by the number of times the host has been ejected and is + // capped by :ref:`max_ejection_time`. // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; @@ -148,4 +149,9 @@ message OutlierDetection { // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. google.protobuf.UInt32Value failure_percentage_request_volume = 20; + + // The maximum time that a host is ejected for. See :ref:`base_ejection_time` + // for more information. + // Defaults to 300000ms or 300s. + google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; } diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index 1ce3966eb132..172be74b46bc 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -590,12 +590,12 @@ message Cluster { } // [#not-implemented-hide:] - message PrefetchPolicy { + message PreconnectPolicy { option (udpa.annotations.versioning).previous_message_type = - "envoy.config.cluster.v3.Cluster.PrefetchPolicy"; + "envoy.config.cluster.v3.Cluster.PreconnectPolicy"; // Indicates how many streams (rounded up) can be anticipated per-upstream for each - // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching + // incoming stream. This is useful for high-QPS or latency-sensitive services. Preconnecting // will only be done if the upstream is healthy. // // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be @@ -604,46 +604,46 @@ message Cluster { // serve both the original and presumed follow-up stream. // // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 - // active streams, there would be 100 connections in use, and 50 connections prefetched. + // active streams, there would be 100 connections in use, and 50 connections preconnected. // This might be a useful value for something like short lived single-use connections, // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more - // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue + // reasonable, where for every 100 connections, 5 preconnected connections would be in the queue // in case of unexpected disconnects where the connection could not be reused. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight. This means in steady state if a connection is torn down, // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be - // prefetched. + // preconnected. // - // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can - // harm latency more than the prefetching helps. - google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 + // This is limited somewhat arbitrarily to 3 because preconnecting too aggressively can + // harm latency more than the preconnecting helps. + google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; // Indicates how many many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). - // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a + // Unlike per_upstream_preconnect_ratio this preconnects across the upstream instances in a // cluster, doing best effort predictions of what upstream would be picked next and // pre-establishing a connection. // - // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first - // incoming stream, 2 connections will be prefetched - one to the first upstream for this + // For example if preconnecting is set to 2 for a round robin HTTP/2 cluster, on the first + // incoming stream, 2 connections will be preconnected - one to the first upstream for this // cluster, one to the second on the assumption there will be a follow-up stream. // - // Prefetching will be limited to one prefetch per configured upstream in the cluster. + // Preconnecting will be limited to one preconnect per configured upstream in the cluster. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight, so during warm up and in steady state if a connection - // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for + // is closed (and per_upstream_preconnect_ratio is not set), there will be a latency hit for // connection establishment. // - // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, - // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. + // If both this and preconnect_ratio are set, Envoy will make sure both predicted needs are met, + // basically preconnecting max(predictive-preconnect, per-upstream-preconnect), for each upstream. // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. - google.protobuf.DoubleValue predictive_prefetch_ratio = 2 + google.protobuf.DoubleValue predictive_preconnect_ratio = 2 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } @@ -1041,8 +1041,8 @@ message Cluster { TrackClusterStats track_cluster_stats = 49; // [#not-implemented-hide:] - // Prefetch configuration for this cluster. - PrefetchPolicy prefetch_policy = 50; + // Preconnect configuration for this cluster. + PreconnectPolicy preconnect_policy = 50; // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate // connection pool for every downstream connection diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto index 29a1e01270d9..9b2efeb53146 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/outlier_detection.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // See the :ref:`architecture overview ` for // more information on outlier detection. -// [#next-free-field: 21] +// [#next-free-field: 22] message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.OutlierDetection"; @@ -34,7 +34,8 @@ message OutlierDetection { google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the - // base time multiplied by the number of times the host has been ejected. + // base time multiplied by the number of times the host has been ejected and is + // capped by :ref:`max_ejection_time`. // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; @@ -148,4 +149,9 @@ message OutlierDetection { // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. google.protobuf.UInt32Value failure_percentage_request_volume = 20; + + // The maximum time that a host is ejected for. See :ref:`base_ejection_time` + // for more information. + // Defaults to 300000ms or 300s. + google.protobuf.Duration max_ejection_time = 21 [(validate.rules).duration = {gt {}}]; } diff --git a/generated_api_shadow/envoy/config/core/v3/base.proto b/generated_api_shadow/envoy/config/core/v3/base.proto index 1184c89de6e2..807045fde4c9 100644 --- a/generated_api_shadow/envoy/config/core/v3/base.proto +++ b/generated_api_shadow/envoy/config/core/v3/base.proto @@ -329,10 +329,10 @@ message DataSource { string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; + bytes inline_bytes = 2; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_len: 1}]; + string inline_string = 3; } } diff --git a/generated_api_shadow/envoy/config/core/v4alpha/base.proto b/generated_api_shadow/envoy/config/core/v4alpha/base.proto index 95ca4f77a2bc..78fb00882e2c 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/base.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/base.proto @@ -336,10 +336,10 @@ message DataSource { string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. - bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; + bytes inline_bytes = 2; // String inlined in the configuration. - string inline_string = 3 [(validate.rules).string = {min_len: 1}]; + string inline_string = 3; } } diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index cb44a81459d2..907f25b66304 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -215,7 +215,11 @@ message FilterChain { // load balancers including the AWS ELB support this option. If the option is // absent or set to false, Envoy will use the physical peer address of the // connection as the remote address. - google.protobuf.BoolValue use_proxy_proto = 4; + // + // This field is deprecated. Add a + // :ref:`PROXY protocol listener filter ` + // explicitly instead. + google.protobuf.BoolValue use_proxy_proto = 4 [deprecated = true]; // [#not-implemented-hide:] filter chain metadata. core.v3.Metadata metadata = 5; diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto index e7fe84482475..7cc1956a1b42 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto @@ -222,7 +222,11 @@ message FilterChain { // load balancers including the AWS ELB support this option. If the option is // absent or set to false, Envoy will use the physical peer address of the // connection as the remote address. - google.protobuf.BoolValue use_proxy_proto = 4; + // + // This field is deprecated. Add a + // :ref:`PROXY protocol listener filter ` + // explicitly instead. + google.protobuf.BoolValue hidden_envoy_deprecated_use_proxy_proto = 4 [deprecated = true]; // [#not-implemented-hide:] filter chain metadata. core.v4alpha.Metadata metadata = 5; diff --git a/include/envoy/common/conn_pool.h b/include/envoy/common/conn_pool.h index c80e80db21ed..e05664288b67 100644 --- a/include/envoy/common/conn_pool.h +++ b/include/envoy/common/conn_pool.h @@ -70,12 +70,12 @@ class Instance { virtual Upstream::HostDescriptionConstSharedPtr host() const PURE; /** - * Prefetches an upstream connection, if existing connections do not meet both current and + * Creates an upstream connection, if existing connections do not meet both current and * anticipated load. * - * @return true if a connection was prefetched, false otherwise. + * @return true if a connection was preconnected, false otherwise. */ - virtual bool maybePrefetch(float prefetch_ratio) PURE; + virtual bool maybePreconnect(float preconnect_ratio) PURE; }; enum class PoolFailureReason { diff --git a/include/envoy/common/optref.h b/include/envoy/common/optref.h index cf51cdaa52ea..f96654f5938a 100644 --- a/include/envoy/common/optref.h +++ b/include/envoy/common/optref.h @@ -34,6 +34,54 @@ template struct OptRef : public absl::optionalhas_value()) { + T& ref = **this; + return &ref; + } + + return nullptr; + } + + /** + * Helper to convert a OptRef into a pointer. If the optional is not set, returns a nullptr. + */ + const T* ptr() const { + if (this->has_value()) { + const T& ref = **this; + return &ref; + } + + return nullptr; + } + + T& ref() { return **this; } + + const T& ref() const { return **this; } }; +/** + * Constructs an OptRef from the provided reference. + * @param ref the reference to wrap + * @return OptRef the wrapped reference + */ +template OptRef makeOptRef(T& ref) { return {ref}; } + +/** + * Constructs an OptRef from the provided pointer. + * @param ptr the pointer to wrap + * @return OptRef the wrapped pointer, or absl::nullopt if the pointer is nullptr + */ +template OptRef makeOptRefFromPtr(T* ptr) { + if (ptr == nullptr) { + return {}; + } + + return {*ptr}; +} + } // namespace Envoy diff --git a/include/envoy/event/dispatcher.h b/include/envoy/event/dispatcher.h index 42048d138f68..599617e87d28 100644 --- a/include/envoy/event/dispatcher.h +++ b/include/envoy/event/dispatcher.h @@ -52,12 +52,67 @@ using PostCb = std::function; using PostCbSharedPtr = std::shared_ptr; /** - * Abstract event dispatching loop. + * Minimal interface to the dispatching loop used to create low-level primitives. See Dispatcher + * below for the full interface. */ -class Dispatcher { +class DispatcherBase { public: - virtual ~Dispatcher() = default; + virtual ~DispatcherBase() = default; + + /** + * Creates a file event that will signal when a file is readable or writable. On UNIX systems this + * can be used for any file like interface (files, sockets, etc.). + * @param fd supplies the fd to watch. + * @param cb supplies the callback to fire when the file is ready. + * @param trigger specifies whether to edge or level trigger. + * @param events supplies a logical OR of FileReadyType events that the file event should + * initially listen on. + */ + virtual FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, + uint32_t events) PURE; + /** + * Allocates a timer. @see Timer for docs on how to use the timer. + * @param cb supplies the callback to invoke when the timer fires. + */ + virtual Event::TimerPtr createTimer(TimerCb cb) PURE; + + /** + * Allocates a schedulable callback. @see SchedulableCallback for docs on how to use the wrapped + * callback. + * @param cb supplies the callback to invoke when the SchedulableCallback is triggered on the + * event loop. + */ + virtual Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) PURE; + + /** + * Sets a tracked object, which is currently operating in this Dispatcher. + * This should be cleared with another call to setTrackedObject() when the object is done doing + * work. Calling setTrackedObject(nullptr) results in no object being tracked. + * + * This is optimized for performance, to avoid allocation where we do scoped object tracking. + * + * @return The previously tracked object or nullptr if there was none. + */ + virtual const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) PURE; + + /** + * Validates that an operation is thread-safe with respect to this dispatcher; i.e. that the + * current thread of execution is on the same thread upon which the dispatcher loop is running. + */ + virtual bool isThreadSafe() const PURE; + + /** + * Returns a recently cached MonotonicTime value. + */ + virtual MonotonicTime approximateMonotonicTime() const PURE; +}; + +/** + * Abstract event dispatching loop. + */ +class Dispatcher : public DispatcherBase { +public: /** * Returns the name that identifies this dispatcher, such as "worker_2" or "main_thread". * @return const std::string& the name that identifies this dispatcher. @@ -136,18 +191,6 @@ class Dispatcher { createDnsResolver(const std::vector& resolvers, bool use_tcp_for_dns_lookups) PURE; - /** - * Creates a file event that will signal when a file is readable or writable. On UNIX systems this - * can be used for any file like interface (files, sockets, etc.). - * @param fd supplies the fd to watch. - * @param cb supplies the callback to fire when the file is ready. - * @param trigger specifies whether to edge or level trigger. - * @param events supplies a logical OR of FileReadyType events that the file event should - * initially listen on. - */ - virtual FileEventPtr createFileEvent(os_fd_t fd, FileReadyCb cb, FileTriggerType trigger, - uint32_t events) PURE; - /** * @return Filesystem::WatcherPtr a filesystem watcher owned by the caller. */ @@ -173,20 +216,6 @@ class Dispatcher { */ virtual Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb) PURE; - /** - * Allocates a timer. @see Timer for docs on how to use the timer. - * @param cb supplies the callback to invoke when the timer fires. - */ - virtual Event::TimerPtr createTimer(TimerCb cb) PURE; - - /** - * Allocates a schedulable callback. @see SchedulableCallback for docs on how to use the wrapped - * callback. - * @param cb supplies the callback to invoke when the SchedulableCallback is triggered on the - * event loop. - */ - virtual Event::SchedulableCallbackPtr createSchedulableCallback(std::function cb) PURE; - /** * Submits an item for deferred delete. @see DeferredDeletable. */ @@ -236,28 +265,6 @@ class Dispatcher { */ virtual Buffer::WatermarkFactory& getWatermarkFactory() PURE; - /** - * Sets a tracked object, which is currently operating in this Dispatcher. - * This should be cleared with another call to setTrackedObject() when the object is done doing - * work. Calling setTrackedObject(nullptr) results in no object being tracked. - * - * This is optimized for performance, to avoid allocation where we do scoped object tracking. - * - * @return The previously tracked object or nullptr if there was none. - */ - virtual const ScopeTrackedObject* setTrackedObject(const ScopeTrackedObject* object) PURE; - - /** - * Validates that an operation is thread-safe with respect to this dispatcher; i.e. that the - * current thread of execution is on the same thread upon which the dispatcher loop is running. - */ - virtual bool isThreadSafe() const PURE; - - /** - * Returns a recently cached MonotonicTime value. - */ - virtual MonotonicTime approximateMonotonicTime() const PURE; - /** * Updates approximate monotonic time to current value. */ diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h index 06bbe4f7a91b..8df40a404e82 100644 --- a/include/envoy/http/header_map.h +++ b/include/envoy/http/header_map.h @@ -8,6 +8,7 @@ #include #include +#include "envoy/common/optref.h" #include "envoy/common/pure.h" #include "common/common/assert.h" @@ -748,16 +749,15 @@ class RequestHeaderMap INLINE_REQ_HEADERS(DEFINE_INLINE_HEADER) }; using RequestHeaderMapPtr = std::unique_ptr; -using RequestHeaderMapOptRef = absl::optional>; -using RequestHeaderMapOptConstRef = absl::optional>; -using RequestHeaderMapOptRef = absl::optional>; +using RequestHeaderMapOptRef = OptRef; +using RequestHeaderMapOptConstRef = OptRef; // Request trailers. class RequestTrailerMap : public HeaderMap, public CustomInlineHeaderBase {}; using RequestTrailerMapPtr = std::unique_ptr; -using RequestTrailerMapOptRef = absl::optional>; +using RequestTrailerMapOptRef = OptRef; // Base class for both response headers and trailers. class ResponseHeaderOrTrailerMap { @@ -776,9 +776,8 @@ class ResponseHeaderMap INLINE_RESP_HEADERS(DEFINE_INLINE_HEADER) }; using ResponseHeaderMapPtr = std::unique_ptr; -using ResponseHeaderMapOptRef = absl::optional>; -using ResponseHeaderMapOptConstRef = - absl::optional>; +using ResponseHeaderMapOptRef = OptRef; +using ResponseHeaderMapOptConstRef = OptRef; // Response trailers. class ResponseTrailerMap @@ -786,7 +785,7 @@ class ResponseTrailerMap public HeaderMap, public CustomInlineHeaderBase {}; using ResponseTrailerMapPtr = std::unique_ptr; -using ResponseTrailerMapOptRef = absl::optional>; +using ResponseTrailerMapOptRef = OptRef; /** * Convenient container type for storing Http::LowerCaseString and std::string key/value pairs. diff --git a/include/envoy/server/listener_manager.h b/include/envoy/server/listener_manager.h index d76d73cf315e..01fe285b1c86 100644 --- a/include/envoy/server/listener_manager.h +++ b/include/envoy/server/listener_manager.h @@ -202,8 +202,9 @@ class ListenerManager { /** * Start all workers accepting new connections on all added listeners. * @param guard_dog supplies the guard dog to use for thread watching. + * @param callback supplies the callback to complete server initialization. */ - virtual void startWorkers(GuardDog& guard_dog) PURE; + virtual void startWorkers(GuardDog& guard_dog, std::function callback) PURE; /** * Stop all listeners from accepting new connections without actually removing any of them. This diff --git a/include/envoy/tracing/http_tracer.h b/include/envoy/tracing/http_tracer.h index 22b024ac97e0..461ff351c27d 100644 --- a/include/envoy/tracing/http_tracer.h +++ b/include/envoy/tracing/http_tracer.h @@ -174,6 +174,14 @@ class Span { * @param key baggage value */ virtual void setBaggage(absl::string_view key, absl::string_view value) PURE; + + /** + * Retrieve the trace ID associated with this span. + * The trace id may be generated for this span, propagated by parent spans, or + * not created yet. + * @return trace ID as a hex string + */ + virtual std::string getTraceIdAsHex() const PURE; }; /** diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 64e309e62f96..b9de4977659b 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -743,7 +743,7 @@ class ClusterInfo { /** * @return how many streams should be anticipated per each current stream. */ - virtual float perUpstreamPrefetchRatio() const PURE; + virtual float perUpstreamPreconnectRatio() const PURE; /** * @return how many streams should be anticipated per each current stream. diff --git a/source/common/common/dump_state_utils.h b/source/common/common/dump_state_utils.h index be6c24885813..704ea271a5c4 100644 --- a/source/common/common/dump_state_utils.h +++ b/source/common/common/dump_state_utils.h @@ -20,7 +20,7 @@ namespace Envoy { #define DUMP_DETAILS(member) \ do { \ os << spaces << #member ": "; \ - if ((member) != nullptr) { \ + if (member) { \ os << "\n"; \ (member)->dumpState(os, indent_level + 1); \ } else { \ diff --git a/source/common/config/datasource.cc b/source/common/config/datasource.cc index 776061a61be2..3f60da031a28 100644 --- a/source/common/config/datasource.cc +++ b/source/common/config/datasource.cc @@ -15,20 +15,27 @@ static constexpr uint32_t RetryCount = 1; std::string read(const envoy::config::core::v3::DataSource& source, bool allow_empty, Api::Api& api) { + std::string data; switch (source.specifier_case()) { case envoy::config::core::v3::DataSource::SpecifierCase::kFilename: - return api.fileSystem().fileReadToEnd(source.filename()); + data = api.fileSystem().fileReadToEnd(source.filename()); + break; case envoy::config::core::v3::DataSource::SpecifierCase::kInlineBytes: - return source.inline_bytes(); + data = source.inline_bytes(); + break; case envoy::config::core::v3::DataSource::SpecifierCase::kInlineString: - return source.inline_string(); + data = source.inline_string(); + break; default: if (!allow_empty) { throw EnvoyException( fmt::format("Unexpected DataSource::specifier_case(): {}", source.specifier_case())); } - return ""; } + if (!allow_empty && data.empty()) { + throw EnvoyException("DataSource cannot be empty"); + } + return data; } absl::optional getPath(const envoy::config::core::v3::DataSource& source) { diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index c45aab8d0f56..2734867446fe 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -48,8 +48,13 @@ void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { } } - if (skip_subsequent_node_ && !first_stream_request_) { - request.clear_node(); + if (skip_subsequent_node_) { + if (first_stream_request_) { + // Node may have been cleared during a previous request. + request.mutable_node()->MergeFrom(local_info_.node()); + } else { + request.clear_node(); + } } VersionConverter::prepareMessageForGrpcWire(request, transport_api_version_); ENVOY_LOG(trace, "Sending DiscoveryRequest for {}: {}", type_url, request.DebugString()); diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index b9667b77525e..a5df5aadc0fd 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -35,49 +35,49 @@ void ConnPoolImplBase::destructAllConnections() { dispatcher_.clearDeferredDeleteList(); } -bool ConnPoolImplBase::shouldCreateNewConnection(float global_prefetch_ratio) const { +bool ConnPoolImplBase::shouldCreateNewConnection(float global_preconnect_ratio) const { // If the host is not healthy, don't make it do extra work, especially as // upstream selection logic may result in bypassing this upstream entirely. - // If an Envoy user wants prefetching for degraded upstreams this could be - // added later via extending the prefetch config. + // If an Envoy user wants preconnecting for degraded upstreams this could be + // added later via extending the preconnect config. if (host_->health() != Upstream::Host::Health::Healthy) { return pending_streams_.size() > connecting_stream_capacity_; } - // If global prefetching is on, and this connection is within the global - // prefetch limit, prefetch. - // We may eventually want to track prefetch_attempts to allow more prefetching for + // If global preconnecting is on, and this connection is within the global + // preconnect limit, preconnect. + // We may eventually want to track preconnect_attempts to allow more preconnecting for // heavily weighted upstreams or sticky picks. - if (global_prefetch_ratio > 1.0 && - ((pending_streams_.size() + 1 + num_active_streams_) * global_prefetch_ratio > + if (global_preconnect_ratio > 1.0 && + ((pending_streams_.size() + 1 + num_active_streams_) * global_preconnect_ratio > (connecting_stream_capacity_ + num_active_streams_))) { return true; } // The number of streams we want to be provisioned for is the number of - // pending and active streams times the prefetch ratio. + // pending and active streams times the preconnect ratio. // The number of streams we are (theoretically) provisioned for is the // connecting stream capacity plus the number of active streams. // - // If prefetch ratio is not set, it defaults to 1, and this simplifies to the + // If preconnect ratio is not set, it defaults to 1, and this simplifies to the // legacy value of pending_streams_.size() > connecting_stream_capacity_ - return (pending_streams_.size() + num_active_streams_) * perUpstreamPrefetchRatio() > + return (pending_streams_.size() + num_active_streams_) * perUpstreamPreconnectRatio() > (connecting_stream_capacity_ + num_active_streams_); } -float ConnPoolImplBase::perUpstreamPrefetchRatio() const { - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_prefetch")) { - return host_->cluster().perUpstreamPrefetchRatio(); +float ConnPoolImplBase::perUpstreamPreconnectRatio() const { + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.allow_preconnect")) { + return host_->cluster().perUpstreamPreconnectRatio(); } else { return 1.0; } } void ConnPoolImplBase::tryCreateNewConnections() { - // Somewhat arbitrarily cap the number of connections prefetched due to new - // incoming connections. The prefetch ratio is capped at 3, so in steady - // state, no more than 3 connections should be prefetched. If hosts go - // unhealthy, and connections are not immediately prefetched, it could be that + // Somewhat arbitrarily cap the number of connections preconnected due to new + // incoming connections. The preconnect ratio is capped at 3, so in steady + // state, no more than 3 connections should be preconnected. If hosts go + // unhealthy, and connections are not immediately preconnected, it could be that // many connections are desired when the host becomes healthy again, but // overwhelming it with connections is not desirable. for (int i = 0; i < 3; ++i) { @@ -87,9 +87,9 @@ void ConnPoolImplBase::tryCreateNewConnections() { } } -bool ConnPoolImplBase::tryCreateNewConnection(float global_prefetch_ratio) { +bool ConnPoolImplBase::tryCreateNewConnection(float global_preconnect_ratio) { // There are already enough CONNECTING connections for the number of queued streams. - if (!shouldCreateNewConnection(global_prefetch_ratio)) { + if (!shouldCreateNewConnection(global_preconnect_ratio)) { return false; } @@ -189,7 +189,7 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) ActiveClient& client = *ready_clients_.front(); ENVOY_CONN_LOG(debug, "using existing connection", client); attachStreamToClient(client, context); - // Even if there's a ready client, we may want to prefetch a new connection + // Even if there's a ready client, we may want to preconnect a new connection // to handle the next incoming stream. tryCreateNewConnections(); return nullptr; @@ -211,8 +211,8 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) } } -bool ConnPoolImplBase::maybePrefetch(float global_prefetch_ratio) { - return tryCreateNewConnection(global_prefetch_ratio); +bool ConnPoolImplBase::maybePreconnect(float global_preconnect_ratio) { + return tryCreateNewConnection(global_preconnect_ratio); } void ConnPoolImplBase::scheduleOnUpstreamReady() { @@ -368,7 +368,7 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view // NOTE: We move the existing pending streams to a temporary list. This is done so that // if retry logic submits a new stream to the pool, we don't fail it inline. purgePendingStreams(client.real_host_description_, failure_reason, reason); - // See if we should prefetch another connection based on active connections. + // See if we should preconnect another connection based on active connections. tryCreateNewConnections(); } @@ -436,14 +436,14 @@ void ConnPoolImplBase::purgePendingStreams( bool ConnPoolImplBase::connectingConnectionIsExcess() const { ASSERT(connecting_stream_capacity_ >= connecting_clients_.front()->effectiveConcurrentStreamLimit()); - // If perUpstreamPrefetchRatio is one, this simplifies to checking if there would still be + // If perUpstreamPreconnectRatio is one, this simplifies to checking if there would still be // sufficient connecting stream capacity to serve all pending streams if the most recent client // were removed from the picture. // - // If prefetch ratio is set, it also factors in the anticipated load based on both queued streams - // and active streams, and makes sure the connecting capacity would still be sufficient to serve - // that even with the most recent client removed. - return (pending_streams_.size() + num_active_streams_) * perUpstreamPrefetchRatio() <= + // If preconnect ratio is set, it also factors in the anticipated load based on both queued + // streams and active streams, and makes sure the connecting capacity would still be sufficient to + // serve that even with the most recent client removed. + return (pending_streams_.size() + num_active_streams_) * perUpstreamPreconnectRatio() <= (connecting_stream_capacity_ - connecting_clients_.front()->effectiveConcurrentStreamLimit() + num_active_streams_); } diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index 9c911f3948de..c2d8ef7da67e 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -159,8 +159,8 @@ class ConnPoolImplBase : protected Logger::Loggable { void scheduleOnUpstreamReady(); ConnectionPool::Cancellable* newStream(AttachContext& context); // Called if this pool is likely to be picked soon, to determine if it's worth - // prefetching a connection. - bool maybePrefetch(float global_prefetch_ratio); + // preconnecting a connection. + bool maybePreconnect(float global_preconnect_ratio); virtual ConnectionPool::Cancellable* newPendingStream(AttachContext& context) PURE; @@ -187,24 +187,24 @@ class ConnPoolImplBase : protected Logger::Loggable { protected: virtual void onConnected(Envoy::ConnectionPool::ActiveClient&) {} - // Creates up to 3 connections, based on the prefetch ratio. + // Creates up to 3 connections, based on the preconnect ratio. void tryCreateNewConnections(); // Creates a new connection if there is sufficient demand, it is allowed by resourceManager, or // to avoid starving this pool. - // Demand is determined either by perUpstreamPrefetchRatio() or global_prefetch_ratio - // if this is called by maybePrefetch() - bool tryCreateNewConnection(float global_prefetch_ratio = 0); + // Demand is determined either by perUpstreamPreconnectRatio() or global_preconnect_ratio + // if this is called by maybePreconnect() + bool tryCreateNewConnection(float global_preconnect_ratio = 0); // A helper function which determines if a canceled pending connection should // be closed as excess or not. bool connectingConnectionIsExcess() const; // A helper function which determines if a new incoming stream should trigger - // connection prefetch. - bool shouldCreateNewConnection(float global_prefetch_ratio) const; + // connection preconnect. + bool shouldCreateNewConnection(float global_preconnect_ratio) const; - float perUpstreamPrefetchRatio() const; + float perUpstreamPreconnectRatio() const; ConnectionPool::Cancellable* addPendingStream(Envoy::ConnectionPool::PendingStreamPtr&& pending_stream) { diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 828f8ceb30e4..4872cd1a7551 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -10,6 +10,7 @@ #include #include "envoy/access_log/access_log.h" +#include "envoy/common/optref.h" #include "envoy/common/random_generator.h" #include "envoy/common/scope_tracker.h" #include "envoy/common/time.h" @@ -229,22 +230,20 @@ class ConnectionManagerImpl : Logger::Loggable, } void chargeStats(const ResponseHeaderMap& headers) override; - // TODO(snowp): Create shared OptRef/OptConstRef helpers Http::RequestHeaderMapOptRef requestHeaders() override { - return request_headers_ ? absl::make_optional(std::ref(*request_headers_)) : absl::nullopt; + return makeOptRefFromPtr(request_headers_.get()); } Http::RequestTrailerMapOptRef requestTrailers() override { - return request_trailers_ ? absl::make_optional(std::ref(*request_trailers_)) : absl::nullopt; + return makeOptRefFromPtr(request_trailers_.get()); } Http::ResponseHeaderMapOptRef continueHeaders() override { - return continue_headers_ ? absl::make_optional(std::ref(*continue_headers_)) : absl::nullopt; + return makeOptRefFromPtr(continue_headers_.get()); } Http::ResponseHeaderMapOptRef responseHeaders() override { - return response_headers_ ? absl::make_optional(std::ref(*response_headers_)) : absl::nullopt; + return makeOptRefFromPtr(response_headers_.get()); } Http::ResponseTrailerMapOptRef responseTrailers() override { - return response_trailers_ ? absl::make_optional(std::ref(*response_trailers_)) - : absl::nullopt; + return makeOptRefFromPtr(response_trailers_.get()); } void endStream() override { diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 29290e74f496..47701fbf23db 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -61,8 +61,8 @@ class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, Http::ConnectionPool::Callbacks& callbacks) override; - bool maybePrefetch(float ratio) override { - return Envoy::ConnectionPool::ConnPoolImplBase::maybePrefetch(ratio); + bool maybePreconnect(float ratio) override { + return Envoy::ConnectionPool::ConnPoolImplBase::maybePreconnect(ratio); } bool hasActiveConnections() const override; diff --git a/source/common/http/filter_manager.cc b/source/common/http/filter_manager.cc index f0b7618794f4..fe579cebefdd 100644 --- a/source/common/http/filter_manager.cc +++ b/source/common/http/filter_manager.cc @@ -477,6 +477,9 @@ void FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHead ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " "decodeHeaders when end_stream is already false"); + ENVOY_BUG( + !state_.local_complete_ || status == FilterHeadersStatus::StopIteration, + "Filters should return FilterHeadersStatus::StopIteration after sending a local reply."); state_.filter_call_state_ &= ~FilterCallState::DecodeHeaders; ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", *this, @@ -484,7 +487,8 @@ void FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHead (*entry)->decode_headers_called_ = true; - const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(status, end_stream); + const auto continue_iteration = + (*entry)->commonHandleAfterHeadersCallback(status, end_stream) && !state_.local_complete_; // If this filter ended the stream, decodeComplete() should be called for it. if ((*entry)->end_stream_) { @@ -844,16 +848,14 @@ void FilterManager::sendLocalReplyViaFilterChain( absl::string_view& content_type) -> void { // TODO(snowp): This &get() business isn't nice, rework LocalReply and others to accept // opt refs. - local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().has_value() - ? &filter_manager_callbacks_.requestHeaders()->get() - : nullptr, - response_headers, stream_info_, code, body, content_type); + local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().ptr(), response_headers, + stream_info_, code, body, content_type); }, [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { filter_manager_callbacks_.setResponseHeaders(std::move(headers)); // TODO: Start encoding from the last decoder filter that saw the // request instead. - encodeHeaders(nullptr, filter_manager_callbacks_.responseHeaders()->get(), end_stream); + encodeHeaders(nullptr, filter_manager_callbacks_.responseHeaders().ref(), end_stream); }, [this](Buffer::Instance& data, bool end_stream) -> void { // TODO: Start encoding from the last decoder filter that saw the @@ -885,10 +887,8 @@ void FilterManager::sendDirectLocalReply( }, [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { - local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().has_value() - ? &filter_manager_callbacks_.requestHeaders()->get() - : nullptr, - response_headers, stream_info_, code, body, content_type); + local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().ptr(), response_headers, + stream_info_, code, body, content_type); }, [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { // Move the response headers into the FilterManager to make sure they're visible to @@ -1248,11 +1248,11 @@ bool FilterManager::createFilterChain() { bool upgrade_rejected = false; const HeaderEntry* upgrade = nullptr; if (filter_manager_callbacks_.requestHeaders()) { - upgrade = filter_manager_callbacks_.requestHeaders()->get().Upgrade(); + upgrade = filter_manager_callbacks_.requestHeaders()->Upgrade(); // Treat CONNECT requests as a special upgrade case. if (!upgrade && HeaderUtility::isConnect(*filter_manager_callbacks_.requestHeaders())) { - upgrade = filter_manager_callbacks_.requestHeaders()->get().Method(); + upgrade = filter_manager_callbacks_.requestHeaders()->Method(); } } diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index 21109a82d774..2493bd2edaff 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -2,6 +2,7 @@ #include +#include "envoy/common/optref.h" #include "envoy/extensions/filters/common/matcher/action/v3/skip_action.pb.h" #include "envoy/http/filter.h" #include "envoy/http/header_map.h" @@ -35,19 +36,11 @@ class HttpMatchingDataImpl : public HttpMatchingData { } Http::RequestHeaderMapOptConstRef requestHeaders() const override { - if (request_headers_) { - return absl::make_optional(std::cref(*request_headers_)); - } - - return absl::nullopt; + return makeOptRefFromPtr(request_headers_); } Http::ResponseHeaderMapOptConstRef responseHeaders() const override { - if (response_headers_) { - return absl::make_optional(std::cref(*response_headers_)); - } - - return absl::nullopt; + return makeOptRefFromPtr(response_headers_); } private: @@ -630,10 +623,10 @@ class FilterManager : public ScopeTrackedObject, const char* spaces = spacesForLevel(indent_level); os << spaces << "FilterManager " << this << DUMP_MEMBER(state_.has_continue_headers_) << "\n"; - DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.requestHeaders()); - DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.requestTrailers()); - DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.responseHeaders()); - DUMP_OPT_REF_DETAILS(filter_manager_callbacks_.responseTrailers()); + DUMP_DETAILS(filter_manager_callbacks_.requestHeaders()); + DUMP_DETAILS(filter_manager_callbacks_.requestTrailers()); + DUMP_DETAILS(filter_manager_callbacks_.responseHeaders()); + DUMP_DETAILS(filter_manager_callbacks_.responseTrailers()); DUMP_DETAILS(&stream_info_); } @@ -689,15 +682,15 @@ class FilterManager : public ScopeTrackedObject, void log() { RequestHeaderMap* request_headers = nullptr; if (filter_manager_callbacks_.requestHeaders()) { - request_headers = &filter_manager_callbacks_.requestHeaders()->get(); + request_headers = filter_manager_callbacks_.requestHeaders().ptr(); } ResponseHeaderMap* response_headers = nullptr; if (filter_manager_callbacks_.responseHeaders()) { - response_headers = &filter_manager_callbacks_.responseHeaders()->get(); + response_headers = filter_manager_callbacks_.responseHeaders().ptr(); } ResponseTrailerMap* response_trailers = nullptr; if (filter_manager_callbacks_.responseTrailers()) { - response_trailers = &filter_manager_callbacks_.responseTrailers()->get(); + response_trailers = filter_manager_callbacks_.responseTrailers().ptr(); } for (const auto& log_handler : access_log_handlers_) { @@ -822,11 +815,11 @@ class FilterManager : public ScopeTrackedObject, void requestHeadersInitialized() { if (Http::Headers::get().MethodValues.Head == - filter_manager_callbacks_.requestHeaders()->get().getMethodValue()) { + filter_manager_callbacks_.requestHeaders()->getMethodValue()) { state_.is_head_request_ = true; } state_.is_grpc_request_ = - Grpc::Common::isGrpcRequestHeaders(filter_manager_callbacks_.requestHeaders()->get()); + Grpc::Common::isGrpcRequestHeaders(filter_manager_callbacks_.requestHeaders().ref()); } /** diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 5eba16d4a6ee..a91658184f78 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -59,7 +59,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.always_apply_route_header_rules", "envoy.reloadable_features.activate_timers_next_event_loop", "envoy.reloadable_features.allow_500_after_100", - "envoy.reloadable_features.allow_prefetch", + "envoy.reloadable_features.allow_preconnect", "envoy.reloadable_features.allow_response_for_timeout", "envoy.reloadable_features.consume_all_retry_headers", "envoy.reloadable_features.check_ocsp_policy", diff --git a/source/common/stats/symbol_table_impl.cc b/source/common/stats/symbol_table_impl.cc index 8b6479c6d840..4d4f2e7ca45a 100644 --- a/source/common/stats/symbol_table_impl.cc +++ b/source/common/stats/symbol_table_impl.cc @@ -243,10 +243,16 @@ void SymbolTableImpl::incRefCount(const StatName& stat_name) { Thread::LockGuard lock(lock_); for (Symbol symbol : symbols) { auto decode_search = decode_map_.find(symbol); - ASSERT(decode_search != decode_map_.end()); + ASSERT(decode_search != decode_map_.end(), + "Please see " + "https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#" + "debugging-symbol-table-asserts"); auto encode_search = encode_map_.find(decode_search->second->toStringView()); - ASSERT(encode_search != encode_map_.end()); + ASSERT(encode_search != encode_map_.end(), + "Please see " + "https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#" + "debugging-symbol-table-asserts"); ++encode_search->second.ref_count_; } diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index 1022b4f7473c..90426c794c49 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -31,7 +31,7 @@ ActiveTcpClient::ActiveTcpClient(Envoy::ConnectionPool::ConnPoolImplBase& parent host->cluster().stats().upstream_cx_tx_bytes_total_, host->cluster().stats().upstream_cx_tx_bytes_buffered_, &host->cluster().stats().bind_errors_, nullptr}); - + connection_->noDelay(true); connection_->connect(); } @@ -67,6 +67,9 @@ void ActiveTcpClient::onEvent(Network::ConnectionEvent event) { if (event == Network::ConnectionEvent::Connected) { connection_->streamInfo().setDownstreamSslConnection(connection_->ssl()); } else { + if (tcp_connection_data_) { + Envoy::Upstream::reportUpstreamCxDestroyActiveRequest(parent_.host(), event); + } callbacks_->onEvent(event); // After receiving a disconnect event, the owner of callbacks_ will likely self-destruct. // Clear the pointer to avoid using it again. diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index 2aa59ea95b14..7b8599a89084 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -157,8 +157,8 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, TcpAttachContext context(&callbacks); return Envoy::ConnectionPool::ConnPoolImplBase::newStream(context); } - bool maybePrefetch(float prefetch_ratio) override { - return Envoy::ConnectionPool::ConnPoolImplBase::maybePrefetch(prefetch_ratio); + bool maybePreconnect(float preconnect_ratio) override { + return Envoy::ConnectionPool::ConnPoolImplBase::maybePreconnect(preconnect_ratio); } ConnectionPool::Cancellable* diff --git a/source/common/tcp/original_conn_pool.h b/source/common/tcp/original_conn_pool.h index e17a4bb2ac38..b10b6b0b8d41 100644 --- a/source/common/tcp/original_conn_pool.h +++ b/source/common/tcp/original_conn_pool.h @@ -33,8 +33,8 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti void drainConnections() override; void closeConnections() override; ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override; - // The old pool does not implement prefetching. - bool maybePrefetch(float) override { return false; } + // The old pool does not implement preconnecting. + bool maybePreconnect(float) override { return false; } Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } protected: diff --git a/source/common/tracing/http_tracer_impl.h b/source/common/tracing/http_tracer_impl.h index 760b4ed2bf3e..7ebe45655d41 100644 --- a/source/common/tracing/http_tracer_impl.h +++ b/source/common/tracing/http_tracer_impl.h @@ -13,6 +13,7 @@ #include "envoy/type/tracing/v3/custom_tag.pb.h" #include "envoy/upstream/cluster_manager.h" +#include "common/common/empty_string.h" #include "common/config/metadata.h" #include "common/http/header_map_impl.h" #include "common/json/json_loader.h" @@ -170,7 +171,8 @@ class NullSpan : public Span { void finishSpan() override {} void injectContext(Http::RequestHeaderMap&) override {} void setBaggage(absl::string_view, absl::string_view) override {} - std::string getBaggage(absl::string_view) override { return std::string(); } + std::string getBaggage(absl::string_view) override { return EMPTY_STRING; } + std::string getTraceIdAsHex() const override { return EMPTY_STRING; } SpanPtr spawnChild(const Config&, const std::string&, SystemTime) override { return SpanPtr{new NullSpan()}; } diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 9a443640b408..ebf842ee95dd 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -844,27 +844,27 @@ ThreadLocalCluster* ClusterManagerImpl::getThreadLocalCluster(absl::string_view } } -void ClusterManagerImpl::maybePrefetch( +void ClusterManagerImpl::maybePreconnect( ThreadLocalClusterManagerImpl::ClusterEntry& cluster_entry, - std::function pick_prefetch_pool) { - // TODO(alyssawilk) As currently implemented, this will always just prefetch + std::function pick_preconnect_pool) { + // TODO(alyssawilk) As currently implemented, this will always just preconnect // one connection ahead of actually needed connections. // // Instead we want to track the following metrics across the entire connection - // pool and use the same algorithm we do for per-upstream prefetch: - // ((pending_streams_ + num_active_streams_) * global_prefetch_ratio > + // pool and use the same algorithm we do for per-upstream preconnect: + // ((pending_streams_ + num_active_streams_) * global_preconnect_ratio > // (connecting_stream_capacity_ + num_active_streams_))) - // and allow multiple prefetches per pick. - // Also cap prefetches such that - // num_unused_prefetch < num hosts - // since if we have more prefetches than hosts, we should consider kicking into - // per-upstream prefetch. + // and allow multiple preconnects per pick. + // Also cap preconnects such that + // num_unused_preconnect < num hosts + // since if we have more preconnects than hosts, we should consider kicking into + // per-upstream preconnect. // - // Once we do this, this should loop capped number of times while shouldPrefetch is true. + // Once we do this, this should loop capped number of times while shouldPreconnect is true. if (cluster_entry.cluster_info_->peekaheadRatio() > 1.0) { - ConnectionPool::Instance* prefetch_pool = pick_prefetch_pool(); - if (prefetch_pool) { - prefetch_pool->maybePrefetch(cluster_entry.cluster_info_->peekaheadRatio()); + ConnectionPool::Instance* preconnect_pool = pick_preconnect_pool(); + if (preconnect_pool) { + preconnect_pool->maybePreconnect(cluster_entry.cluster_info_->peekaheadRatio()); } } } @@ -876,13 +876,13 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpConnPool( // Select a host and create a connection pool for it if it does not already exist. auto ret = connPool(priority, protocol, context, false); - // Now see if another host should be prefetched. + // Now see if another host should be preconnected. // httpConnPool is called immediately before a call for newStream. newStream doesn't - // have the load balancer context needed to make selection decisions so prefetching must be + // have the load balancer context needed to make selection decisions so preconnecting must be // performed here in anticipation of the new stream. // TODO(alyssawilk) refactor to have one function call and return a pair, so this invariant is // code-enforced. - maybePrefetch(*this, [this, &priority, &protocol, &context]() { + maybePreconnect(*this, [this, &priority, &protocol, &context]() { return connPool(priority, protocol, context, true); }); @@ -896,13 +896,13 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( auto ret = tcpConnPool(priority, context, false); // tcpConnPool is called immediately before a call for newConnection. newConnection - // doesn't have the load balancer context needed to make selection decisions so prefetching must + // doesn't have the load balancer context needed to make selection decisions so preconnecting must // be performed here in anticipation of the new connection. // TODO(alyssawilk) refactor to have one function call and return a pair, so this invariant is // code-enforced. - // Now see if another host should be prefetched. - maybePrefetch(*this, - [this, &priority, &context]() { return tcpConnPool(priority, context, true); }); + // Now see if another host should be preconnected. + maybePreconnect(*this, + [this, &priority, &context]() { return tcpConnPool(priority, context, true); }); return ret; } diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index 2f3fc30f288d..3cd527ec55c1 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -562,8 +562,8 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable prefetch_pool); + static void maybePreconnect(ThreadLocalClusterManagerImpl::ClusterEntry& cluster_entry, + std::function preconnect_pool); ClusterManagerFactory& factory_; Runtime::Loader& runtime_; diff --git a/source/common/upstream/load_balancer_impl.cc b/source/common/upstream/load_balancer_impl.cc index da89730843f9..1297da36f163 100644 --- a/source/common/upstream/load_balancer_impl.cc +++ b/source/common/upstream/load_balancer_impl.cc @@ -829,8 +829,8 @@ HostConstSharedPtr EdfLoadBalancerBase::chooseHostOnce(LoadBalancerContext* cont HostConstSharedPtr LeastRequestLoadBalancer::unweightedHostPeek(const HostVector&, const HostsSource&) { - // LeastRequestLoadBalancer can not do deterministic prefetching, because - // any other thread might select the least-requested-host between prefetch and + // LeastRequestLoadBalancer can not do deterministic preconnecting, because + // any other thread might select the least-requested-host between preconnect and // host-pick, and change the rq_active checks. return nullptr; } diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index a5e6b6e96cef..0e1a3d992e7a 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -56,7 +56,7 @@ class OriginalDstCluster : public ClusterImplBase { // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; - // Prefetching is not implemented for OriginalDstCluster + // Preconnecting is not implemented for OriginalDstCluster HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } private: diff --git a/source/common/upstream/outlier_detection_impl.cc b/source/common/upstream/outlier_detection_impl.cc index e27ab5cca985..df2ed5d083aa 100644 --- a/source/common/upstream/outlier_detection_impl.cc +++ b/source/common/upstream/outlier_detection_impl.cc @@ -73,18 +73,17 @@ void DetectorHostMonitorImpl::putHttpResponseCode(uint64_t response_code) { return; } if (Http::CodeUtility::isGatewayError(response_code)) { - if (++consecutive_gateway_failure_ == detector->runtime().snapshot().getInteger( - "outlier_detection.consecutive_gateway_failure", - detector->config().consecutiveGatewayFailure())) { + if (++consecutive_gateway_failure_ == + detector->runtime().snapshot().getInteger( + ConsecutiveGatewayFailureRuntime, detector->config().consecutiveGatewayFailure())) { detector->onConsecutiveGatewayFailure(host_.lock()); } } else { consecutive_gateway_failure_ = 0; } - if (++consecutive_5xx_ == - detector->runtime().snapshot().getInteger("outlier_detection.consecutive_5xx", - detector->config().consecutive5xx())) { + if (++consecutive_5xx_ == detector->runtime().snapshot().getInteger( + Consecutive5xxRuntime, detector->config().consecutive5xx())) { detector->onConsecutive5xx(host_.lock()); } } else { @@ -188,7 +187,7 @@ void DetectorHostMonitorImpl::localOriginFailure() { local_origin_sr_monitor_.incTotalReqCounter(); if (++consecutive_local_origin_failure_ == detector->runtime().snapshot().getInteger( - "outlier_detection.consecutive_local_origin_failure", + ConsecutiveLocalOriginFailureRuntime, detector->config().consecutiveLocalOriginFailure())) { detector->onConsecutiveLocalOriginFailure(host_.lock()); } @@ -250,7 +249,9 @@ DetectorConfig::DetectorConfig(const envoy::config::cluster::v3::OutlierDetectio DEFAULT_ENFORCING_CONSECUTIVE_LOCAL_ORIGIN_FAILURE))), enforcing_local_origin_success_rate_(static_cast( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, enforcing_local_origin_success_rate, - DEFAULT_ENFORCING_LOCAL_ORIGIN_SUCCESS_RATE))) {} + DEFAULT_ENFORCING_LOCAL_ORIGIN_SUCCESS_RATE))), + max_ejection_time_ms_(static_cast( + PROTOBUF_GET_MS_OR_DEFAULT(config, max_ejection_time, DEFAULT_MAX_EJECTION_TIME_MS))) {} DetectorImpl::DetectorImpl(const Cluster& cluster, const envoy::config::cluster::v3::OutlierDetection& config, @@ -279,6 +280,11 @@ DetectorImpl::create(const Cluster& cluster, const envoy::config::cluster::v3::OutlierDetection& config, Event::Dispatcher& dispatcher, Runtime::Loader& runtime, TimeSource& time_source, EventLoggerSharedPtr event_logger) { + if (config.max_ejection_time() < config.base_ejection_time()) { + throw EnvoyException( + "outlier detector's max_ejection_time cannot be smaller than base_ejection_time"); + } + std::shared_ptr detector( new DetectorImpl(cluster, config, dispatcher, runtime, time_source, event_logger)); detector->initialize(cluster); @@ -321,20 +327,26 @@ void DetectorImpl::addHostMonitor(HostSharedPtr host) { void DetectorImpl::armIntervalTimer() { interval_timer_->enableTimer(std::chrono::milliseconds( - runtime_.snapshot().getInteger("outlier_detection.interval_ms", config_.intervalMs()))); + runtime_.snapshot().getInteger(IntervalMsRuntime, config_.intervalMs()))); } void DetectorImpl::checkHostForUneject(HostSharedPtr host, DetectorHostMonitorImpl* monitor, MonotonicTime now) { if (!host->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)) { + // Node seems to be healthy and was not ejected since the last check. + if (monitor->ejectTimeBackoff() != 0) { + monitor->ejectTimeBackoff()--; + } return; } - std::chrono::milliseconds base_eject_time = - std::chrono::milliseconds(runtime_.snapshot().getInteger( - "outlier_detection.base_ejection_time_ms", config_.baseEjectionTimeMs())); + const std::chrono::milliseconds base_eject_time = std::chrono::milliseconds( + runtime_.snapshot().getInteger(BaseEjectionTimeMsRuntime, config_.baseEjectionTimeMs())); + const std::chrono::milliseconds max_eject_time = std::chrono::milliseconds( + runtime_.snapshot().getInteger(MaxEjectionTimeMsRuntime, config_.maxEjectionTimeMs())); ASSERT(monitor->numEjections() > 0); - if ((base_eject_time * monitor->numEjections()) <= (now - monitor->lastEjectionTime().value())) { + if ((min(base_eject_time * monitor->ejectTimeBackoff(), max_eject_time)) <= + (now - monitor->lastEjectionTime().value())) { ejections_active_helper_.dec(); host->healthFlagClear(Host::HealthFlag::FAILED_OUTLIER_CHECK); // Reset the consecutive failure counters to avoid re-ejection on very few new errors due @@ -353,30 +365,26 @@ void DetectorImpl::checkHostForUneject(HostSharedPtr host, DetectorHostMonitorIm bool DetectorImpl::enforceEjection(envoy::data::cluster::v2alpha::OutlierEjectionType type) { switch (type) { case envoy::data::cluster::v2alpha::CONSECUTIVE_5XX: - return runtime_.snapshot().featureEnabled("outlier_detection.enforcing_consecutive_5xx", + return runtime_.snapshot().featureEnabled(EnforcingConsecutive5xxRuntime, config_.enforcingConsecutive5xx()); case envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE: - return runtime_.snapshot().featureEnabled( - "outlier_detection.enforcing_consecutive_gateway_failure", - config_.enforcingConsecutiveGatewayFailure()); + return runtime_.snapshot().featureEnabled(EnforcingConsecutiveGatewayFailureRuntime, + config_.enforcingConsecutiveGatewayFailure()); case envoy::data::cluster::v2alpha::SUCCESS_RATE: - return runtime_.snapshot().featureEnabled("outlier_detection.enforcing_success_rate", + return runtime_.snapshot().featureEnabled(EnforcingSuccessRateRuntime, config_.enforcingSuccessRate()); case envoy::data::cluster::v2alpha::CONSECUTIVE_LOCAL_ORIGIN_FAILURE: - return runtime_.snapshot().featureEnabled( - "outlier_detection.enforcing_consecutive_local_origin_failure", - config_.enforcingConsecutiveLocalOriginFailure()); + return runtime_.snapshot().featureEnabled(EnforcingConsecutiveLocalOriginFailureRuntime, + config_.enforcingConsecutiveLocalOriginFailure()); case envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN: - return runtime_.snapshot().featureEnabled( - "outlier_detection.enforcing_local_origin_success_rate", - config_.enforcingLocalOriginSuccessRate()); + return runtime_.snapshot().featureEnabled(EnforcingLocalOriginSuccessRateRuntime, + config_.enforcingLocalOriginSuccessRate()); case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE: - return runtime_.snapshot().featureEnabled("outlier_detection.enforcing_failure_percentage", + return runtime_.snapshot().featureEnabled(EnforcingFailurePercentageRuntime, config_.enforcingFailurePercentage()); case envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE_LOCAL_ORIGIN: - return runtime_.snapshot().featureEnabled( - "outlier_detection.enforcing_failure_percentage_local_origin", - config_.enforcingFailurePercentageLocalOrigin()); + return runtime_.snapshot().featureEnabled(EnforcingFailurePercentageLocalOriginRuntime, + config_.enforcingFailurePercentageLocalOrigin()); default: // Checked by schema. NOT_REACHED_GCOVR_EXCL_LINE; @@ -449,8 +457,7 @@ void DetectorImpl::updateDetectedEjectionStats( void DetectorImpl::ejectHost(HostSharedPtr host, envoy::data::cluster::v2alpha::OutlierEjectionType type) { uint64_t max_ejection_percent = std::min( - 100, runtime_.snapshot().getInteger("outlier_detection.max_ejection_percent", - config_.maxEjectionPercent())); + 100, runtime_.snapshot().getInteger(MaxEjectionPercentRuntime, config_.maxEjectionPercent())); double ejected_percent = 100.0 * ejections_active_helper_.value() / host_monitors_.size(); // Note this is not currently checked per-priority level, so it is possible // for outlier detection to eject all hosts at any given priority level. @@ -464,6 +471,15 @@ void DetectorImpl::ejectHost(HostSharedPtr host, ejections_active_helper_.inc(); updateEnforcedEjectionStats(type); host_monitors_[host]->eject(time_source_.monotonicTime()); + const std::chrono::milliseconds base_eject_time = std::chrono::milliseconds( + runtime_.snapshot().getInteger(BaseEjectionTimeMsRuntime, config_.baseEjectionTimeMs())); + const std::chrono::milliseconds max_eject_time = std::chrono::milliseconds( + runtime_.snapshot().getInteger(MaxEjectionTimeMsRuntime, config_.maxEjectionTimeMs())); + if ((host_monitors_[host]->ejectTimeBackoff() * base_eject_time) < + (max_eject_time + base_eject_time)) { + host_monitors_[host]->ejectTimeBackoff()++; + } + runCallbacks(host); if (event_logger_) { event_logger_->logEject(host, *this, type, true); @@ -586,15 +602,13 @@ DetectorImpl::EjectionPair DetectorImpl::successRateEjectionThreshold( void DetectorImpl::processSuccessRateEjections( DetectorHostMonitor::SuccessRateMonitorType monitor_type) { uint64_t success_rate_minimum_hosts = runtime_.snapshot().getInteger( - "outlier_detection.success_rate_minimum_hosts", config_.successRateMinimumHosts()); + SuccessRateMinimumHostsRuntime, config_.successRateMinimumHosts()); uint64_t success_rate_request_volume = runtime_.snapshot().getInteger( - "outlier_detection.success_rate_request_volume", config_.successRateRequestVolume()); - uint64_t failure_percentage_minimum_hosts = - runtime_.snapshot().getInteger("outlier_detection.failure_percentage_minimum_hosts", - config_.failurePercentageMinimumHosts()); - uint64_t failure_percentage_request_volume = - runtime_.snapshot().getInteger("outlier_detection.failure_percentage_request_volume", - config_.failurePercentageRequestVolume()); + SuccessRateRequestVolumeRuntime, config_.successRateRequestVolume()); + uint64_t failure_percentage_minimum_hosts = runtime_.snapshot().getInteger( + FailurePercentageMinimumHostsRuntime, config_.failurePercentageMinimumHosts()); + uint64_t failure_percentage_request_volume = runtime_.snapshot().getInteger( + FailurePercentageRequestVolumeRuntime, config_.failurePercentageRequestVolume()); std::vector valid_success_rate_hosts; std::vector valid_failure_percentage_hosts; @@ -645,7 +659,7 @@ void DetectorImpl::processSuccessRateEjections( if (!valid_success_rate_hosts.empty() && valid_success_rate_hosts.size() >= success_rate_minimum_hosts) { const double success_rate_stdev_factor = - runtime_.snapshot().getInteger("outlier_detection.success_rate_stdev_factor", + runtime_.snapshot().getInteger(SuccessRateStdevFactorRuntime, config_.successRateStdevFactor()) / 1000.0; getSRNums(monitor_type) = successRateEjectionThreshold( @@ -667,7 +681,7 @@ void DetectorImpl::processSuccessRateEjections( if (!valid_failure_percentage_hosts.empty() && valid_failure_percentage_hosts.size() >= failure_percentage_minimum_hosts) { const double failure_percentage_threshold = runtime_.snapshot().getInteger( - "outlier_detection.failure_percentage_threshold", config_.failurePercentageThreshold()); + FailurePercentageThresholdRuntime, config_.failurePercentageThreshold()); for (const auto& host_success_rate_pair : valid_failure_percentage_hosts) { if ((100.0 - host_success_rate_pair.success_rate_) >= failure_percentage_threshold) { diff --git a/source/common/upstream/outlier_detection_impl.h b/source/common/upstream/outlier_detection_impl.h index dcaf3c638757..cf4b6adcdf44 100644 --- a/source/common/upstream/outlier_detection_impl.h +++ b/source/common/upstream/outlier_detection_impl.h @@ -144,6 +144,8 @@ class DetectorHostMonitorImpl : public DetectorHostMonitor { void eject(MonotonicTime ejection_time); void uneject(MonotonicTime ejection_time); + uint32_t& ejectTimeBackoff() { return eject_time_backoff_; } + void resetConsecutive5xx() { consecutive_5xx_ = 0; } void resetConsecutiveGatewayFailure() { consecutive_gateway_failure_ = 0; } void resetConsecutiveLocalOriginFailure() { consecutive_local_origin_failure_ = 0; } @@ -188,6 +190,10 @@ class DetectorHostMonitorImpl : public DetectorHostMonitor { absl::optional last_ejection_time_; absl::optional last_unejection_time_; uint32_t num_ejections_{}; + // Determines ejection time. Each time a node is ejected, + // the eject_time_backoff is incremented. The value is decremented + // each time the node was healthy and not ejected. + uint32_t eject_time_backoff_{}; // counters for externally generated failures std::atomic consecutive_5xx_{0}; @@ -242,6 +248,43 @@ struct DetectionStats { ALL_OUTLIER_DETECTION_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) }; +// Names used in runtime configuration. +constexpr absl::string_view MaxEjectionPercentRuntime = "outlier_detection.max_ejection_percent"; +constexpr absl::string_view ConsecutiveGatewayFailureRuntime = + "outlier_detection.consecutive_gateway_failure"; +constexpr absl::string_view Consecutive5xxRuntime = "outlier_detection.consecutive_5xx"; +constexpr absl::string_view ConsecutiveLocalOriginFailureRuntime = + "outlier_detection.consecutive_local_origin_failure"; +constexpr absl::string_view IntervalMsRuntime = "outlier_detection.interval_ms"; +constexpr absl::string_view BaseEjectionTimeMsRuntime = "outlier_detection.base_ejection_time_ms"; +constexpr absl::string_view MaxEjectionTimeMsRuntime = "outlier_detection.max_ejection_time_ms"; +constexpr absl::string_view EnforcingConsecutive5xxRuntime = + "outlier_detection.enforcing_consecutive_5xx"; +constexpr absl::string_view EnforcingConsecutiveGatewayFailureRuntime = + "outlier_detection.enforcing_consecutive_gateway_failure"; +constexpr absl::string_view EnforcingSuccessRateRuntime = + "outlier_detection.enforcing_success_rate"; +constexpr absl::string_view EnforcingConsecutiveLocalOriginFailureRuntime = + "outlier_detection.enforcing_consecutive_local_origin_failure"; +constexpr absl::string_view EnforcingLocalOriginSuccessRateRuntime = + "outlier_detection.enforcing_local_origin_success_rate"; +constexpr absl::string_view EnforcingFailurePercentageRuntime = + "outlier_detection.enforcing_failure_percentage"; +constexpr absl::string_view EnforcingFailurePercentageLocalOriginRuntime = + "outlier_detection.enforcing_failure_percentage_local_origin"; +constexpr absl::string_view SuccessRateMinimumHostsRuntime = + "outlier_detection.success_rate_minimum_hosts"; +constexpr absl::string_view SuccessRateRequestVolumeRuntime = + "outlier_detection.success_rate_request_volume"; +constexpr absl::string_view FailurePercentageMinimumHostsRuntime = + "outlier_detection.failure_percentage_minimum_hosts"; +constexpr absl::string_view FailurePercentageRequestVolumeRuntime = + "outlier_detection.failure_percentage_request_volume"; +constexpr absl::string_view SuccessRateStdevFactorRuntime = + "outlier_detection.success_rate_stdev_factor"; +constexpr absl::string_view FailurePercentageThresholdRuntime = + "outlier_detection.failure_percentage_threshold"; + /** * Configuration for the outlier detection. */ @@ -275,6 +318,7 @@ class DetectorConfig { return enforcing_consecutive_local_origin_failure_; } uint64_t enforcingLocalOriginSuccessRate() const { return enforcing_local_origin_success_rate_; } + uint64_t maxEjectionTimeMs() const { return max_ejection_time_ms_; } private: const uint64_t interval_ms_; @@ -297,6 +341,7 @@ class DetectorConfig { const uint64_t consecutive_local_origin_failure_; const uint64_t enforcing_consecutive_local_origin_failure_; const uint64_t enforcing_local_origin_success_rate_; + const uint64_t max_ejection_time_ms_; static const uint64_t DEFAULT_INTERVAL_MS = 10000; static const uint64_t DEFAULT_BASE_EJECTION_TIME_MS = 30000; @@ -317,6 +362,7 @@ class DetectorConfig { static const uint64_t DEFAULT_CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 5; static const uint64_t DEFAULT_ENFORCING_CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 100; static const uint64_t DEFAULT_ENFORCING_LOCAL_ORIGIN_SUCCESS_RATE = 100; + static const uint64_t DEFAULT_MAX_EJECTION_TIME_MS = 10 * DEFAULT_BASE_EJECTION_TIME_MS; }; /** diff --git a/source/common/upstream/thread_aware_lb_impl.h b/source/common/upstream/thread_aware_lb_impl.h index 8c02cd54284f..608ed58e3edc 100644 --- a/source/common/upstream/thread_aware_lb_impl.h +++ b/source/common/upstream/thread_aware_lb_impl.h @@ -73,7 +73,7 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL HostConstSharedPtr chooseHostOnce(LoadBalancerContext*) override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } - // Prefetch not implemented for hash based load balancing + // Preconnect not implemented for hash based load balancing HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } protected: @@ -97,7 +97,7 @@ class ThreadAwareLoadBalancerBase : public LoadBalancerBase, public ThreadAwareL // Upstream::LoadBalancer HostConstSharedPtr chooseHost(LoadBalancerContext* context) override; - // Prefetch not implemented for hash based load balancing + // Preconnect not implemented for hash based load balancing HostConstSharedPtr peekAnotherHost(LoadBalancerContext*) override { return nullptr; } ClusterStats& stats_; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 78df761e6b40..dbea77ba8aed 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -743,10 +743,10 @@ ClusterInfoImpl::ClusterInfoImpl( Http::DEFAULT_MAX_HEADERS_COUNT))), connect_timeout_( std::chrono::milliseconds(PROTOBUF_GET_MS_REQUIRED(config, connect_timeout))), - per_upstream_prefetch_ratio_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( - config.prefetch_policy(), per_upstream_prefetch_ratio, 1.0)), - peekahead_ratio_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.prefetch_policy(), predictive_prefetch_ratio, 0)), + per_upstream_preconnect_ratio_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + config.preconnect_policy(), per_upstream_preconnect_ratio, 1.0)), + peekahead_ratio_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config.preconnect_policy(), + predictive_preconnect_ratio, 0)), per_connection_buffer_limit_bytes_( PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, per_connection_buffer_limit_bytes, 1024 * 1024)), socket_matcher_(std::move(socket_matcher)), stats_scope_(std::move(stats_scope)), diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 818bbf496afe..3ff38c4b770d 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -548,7 +548,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idleTimeout() const override { return idle_timeout_; } - float perUpstreamPrefetchRatio() const override { return per_upstream_prefetch_ratio_; } + float perUpstreamPreconnectRatio() const override { return per_upstream_preconnect_ratio_; } float peekaheadRatio() const override { return peekahead_ratio_; } uint32_t perConnectionBufferLimitBytes() const override { return per_connection_buffer_limit_bytes_; @@ -683,7 +683,7 @@ class ClusterInfoImpl : public ClusterInfo, protected Logger::Loggable idle_timeout_; - const float per_upstream_prefetch_ratio_; + const float per_upstream_preconnect_ratio_; const float peekahead_ratio_; const uint32_t per_connection_buffer_limit_bytes_; TransportSocketMatcherPtr socket_matcher_; diff --git a/source/docs/stats.md b/source/docs/stats.md index d6f82e80e445..fafb6265969b 100644 --- a/source/docs/stats.md +++ b/source/docs/stats.md @@ -269,3 +269,31 @@ Developers trying to can iterate through changes in these tests locally with: test/integration:stats_integration_test ``` +## Debugging Symbol Table Assertions + +If you are visiting this section because you saw a message like: + +```bash +[...][16][critical][assert] [source/common/stats/symbol_table_impl.cc:251] assert failure: +decode_search != decode_map_.end(). Details: Please see +https://github.com/envoyproxy/envoy/blob/master/source/docs/stats.md#debugging-symbol-table-asserts +``` +then you have come to the right place. + +In production, there is generally one `SymbolTable` per process, except in the 3 +or 4 places where IsolatedStoreImpl is deliberately instantiated. In those scenarios, +we don't expect names from these stores to be joined together. + +In tests, however, most of the Envoy mock structures do not allow any context to +be passed into constructors. So each mock structure instance that requires a +symbol table must instantiate its own. This is fine if they are truly isolated, +but in a number of scenarios, StatNames from different structures are joined +together during stat construction. Comingling of StatNames from different symbol +tables does not work, and the first evidence of this is usually an assertion on +the `decode_map_` lookup in SymbolTableImpl::incRefCount. + +To avoid this assertion, we must ensure that the symbols being combined all come +from the same symbol table. To facilitate this, a test-only global singleton can +be instantiated, via either `Stats::TestUtil::TestSymbolTable` or +`Stats::TestUtil::TestStore`. All such structures use a singleton symbol-table +whose lifetime is a single test method. This should resolve the assertion. diff --git a/source/extensions/clusters/aggregate/cluster.h b/source/extensions/clusters/aggregate/cluster.h index 72df0d685824..ab2207bc8299 100644 --- a/source/extensions/clusters/aggregate/cluster.h +++ b/source/extensions/clusters/aggregate/cluster.h @@ -78,7 +78,7 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer, // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; - // Prefetching not yet implemented for extensions. + // Preconnecting not yet implemented for extensions. Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { return nullptr; } @@ -97,7 +97,7 @@ class AggregateClusterLoadBalancer : public Upstream::LoadBalancer, // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; - // Prefetching not yet implemented for extensions. + // Preconnecting not yet implemented for extensions. Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { return nullptr; } diff --git a/source/extensions/clusters/dynamic_forward_proxy/cluster.h b/source/extensions/clusters/dynamic_forward_proxy/cluster.h index 32f82808570d..309da4ed4407 100644 --- a/source/extensions/clusters/dynamic_forward_proxy/cluster.h +++ b/source/extensions/clusters/dynamic_forward_proxy/cluster.h @@ -58,7 +58,7 @@ class Cluster : public Upstream::BaseDynamicClusterImpl, // Upstream::LoadBalancer Upstream::HostConstSharedPtr chooseHost(Upstream::LoadBalancerContext* context) override; - // Prefetching not implemented. + // Preconnecting not implemented. Upstream::HostConstSharedPtr peekAnotherHost(Upstream::LoadBalancerContext*) override { return nullptr; } diff --git a/source/extensions/common/wasm/wasm.cc b/source/extensions/common/wasm/wasm.cc index c3099c554e87..cc4efd963fdf 100644 --- a/source/extensions/common/wasm/wasm.cc +++ b/source/extensions/common/wasm/wasm.cc @@ -115,8 +115,7 @@ Wasm::Wasm(absl::string_view runtime, absl::string_view vm_id, absl::string_view Wasm::Wasm(WasmHandleSharedPtr base_wasm_handle, Event::Dispatcher& dispatcher) : WasmBase(base_wasm_handle, [&base_wasm_handle]() { - return createWasmVm( - getEnvoyWasmIntegration(*base_wasm_handle->wasm()->wasm_vm()).runtime()); + return createWasmVm(base_wasm_handle->wasm()->wasm_vm()->runtime()); }), scope_(getWasm(base_wasm_handle)->scope_), cluster_manager_(getWasm(base_wasm_handle)->clusterManager()), dispatcher_(dispatcher), diff --git a/source/extensions/common/wasm/wasm_extension.cc b/source/extensions/common/wasm/wasm_extension.cc index d37fcf79f95b..6c99a8124dc7 100644 --- a/source/extensions/common/wasm/wasm_extension.cc +++ b/source/extensions/common/wasm/wasm_extension.cc @@ -39,11 +39,6 @@ RegisterWasmExtension::RegisterWasmExtension(WasmExtension* extension) { wasm_extension = extension; } -std::unique_ptr -EnvoyWasm::createEnvoyWasmVmIntegration(absl::string_view runtime) { - return std::make_unique(runtime); -} - PluginHandleExtensionFactory EnvoyWasm::pluginFactory() { return [](const WasmHandleSharedPtr& base_wasm, absl::string_view plugin_key) -> PluginHandleBaseSharedPtr { diff --git a/source/extensions/common/wasm/wasm_extension.h b/source/extensions/common/wasm/wasm_extension.h index 5842fd6227d9..8fb6b9b07f77 100644 --- a/source/extensions/common/wasm/wasm_extension.h +++ b/source/extensions/common/wasm/wasm_extension.h @@ -53,8 +53,6 @@ class WasmExtension : Logger::Loggable { virtual ~WasmExtension() = default; virtual void initialize() = 0; - virtual std::unique_ptr - createEnvoyWasmVmIntegration(absl::string_view runtime) = 0; virtual PluginHandleExtensionFactory pluginFactory() = 0; virtual WasmHandleExtensionFactory wasmFactory() = 0; virtual WasmHandleExtensionCloneFactory wasmCloneFactory() = 0; @@ -99,8 +97,6 @@ class EnvoyWasm : public WasmExtension { EnvoyWasm() = default; ~EnvoyWasm() override = default; void initialize() override {} - std::unique_ptr - createEnvoyWasmVmIntegration(absl::string_view runtime) override; PluginHandleExtensionFactory pluginFactory() override; WasmHandleExtensionFactory wasmFactory() override; WasmHandleExtensionCloneFactory wasmCloneFactory() override; diff --git a/source/extensions/common/wasm/wasm_vm.cc b/source/extensions/common/wasm/wasm_vm.cc index 56cfda3c8f5e..c8d3fb618e45 100644 --- a/source/extensions/common/wasm/wasm_vm.cc +++ b/source/extensions/common/wasm/wasm_vm.cc @@ -19,7 +19,25 @@ namespace Extensions { namespace Common { namespace Wasm { -void EnvoyWasmVmIntegration::error(absl::string_view message) { ENVOY_LOG(trace, message); } +proxy_wasm::LogLevel EnvoyWasmVmIntegration::getLogLevel() { + switch (ENVOY_LOGGER().level()) { + case spdlog::level::trace: + return proxy_wasm::LogLevel::trace; + case spdlog::level::debug: + return proxy_wasm::LogLevel::debug; + case spdlog::level::info: + return proxy_wasm::LogLevel::info; + case spdlog::level::warn: + return proxy_wasm::LogLevel::warn; + case spdlog::level::err: + return proxy_wasm::LogLevel::error; + default: + return proxy_wasm::LogLevel::critical; + } +} + +void EnvoyWasmVmIntegration::error(absl::string_view message) { ENVOY_LOG(error, message); } +void EnvoyWasmVmIntegration::trace(absl::string_view message) { ENVOY_LOG(trace, message); } bool EnvoyWasmVmIntegration::getNullVmFunction(absl::string_view function_name, bool returns_word, int number_of_arguments, @@ -72,7 +90,7 @@ WasmVmPtr createWasmVm(absl::string_view runtime) { } auto wasm = runtime_factory->createWasmVm(); - wasm->integration() = getWasmExtension()->createEnvoyWasmVmIntegration(runtime_factory->name()); + wasm->integration() = std::make_unique(); return wasm; } diff --git a/source/extensions/common/wasm/wasm_vm.h b/source/extensions/common/wasm/wasm_vm.h index fee77510a65d..668f8a60f36b 100644 --- a/source/extensions/common/wasm/wasm_vm.h +++ b/source/extensions/common/wasm/wasm_vm.h @@ -17,28 +17,19 @@ namespace Extensions { namespace Common { namespace Wasm { -// Wasm VM data providing stats. +// providing logger and NullVm function getter to Wasm VM. class EnvoyWasmVmIntegration : public proxy_wasm::WasmVmIntegration, Logger::Loggable { public: - EnvoyWasmVmIntegration(absl::string_view runtime) : runtime_(std::string(runtime)) {} - // proxy_wasm::WasmVmIntegration - proxy_wasm::WasmVmIntegration* clone() override { return new EnvoyWasmVmIntegration(runtime_); } + proxy_wasm::WasmVmIntegration* clone() override { return new EnvoyWasmVmIntegration(); } bool getNullVmFunction(absl::string_view function_name, bool returns_word, int number_of_arguments, proxy_wasm::NullPlugin* plugin, void* ptr_to_function_return) override; + proxy_wasm::LogLevel getLogLevel() override; void error(absl::string_view message) override; - - const std::string& runtime() const { return runtime_; } - -protected: - const std::string runtime_; -}; // namespace Wasm - -inline EnvoyWasmVmIntegration& getEnvoyWasmIntegration(proxy_wasm::WasmVm& wasm_vm) { - return *static_cast(wasm_vm.integration().get()); -} + void trace(absl::string_view message) override; +}; // Exceptions for issues with the WebAssembly code. class WasmException : public EnvoyException { diff --git a/source/extensions/filters/http/oauth2/filter.cc b/source/extensions/filters/http/oauth2/filter.cc index d1b3654457be..aa1396ec9302 100644 --- a/source/extensions/filters/http/oauth2/filter.cc +++ b/source/extensions/filters/http/oauth2/filter.cc @@ -212,12 +212,12 @@ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& he Http::Utility::Url state_url; if (!state_url.initialize(state, false)) { sendUnauthorizedResponse(); - return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + return Http::FilterHeadersStatus::StopIteration; } // Avoid infinite redirect storm if (config_->redirectPathMatcher().match(state_url.pathAndQueryParams())) { sendUnauthorizedResponse(); - return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + return Http::FilterHeadersStatus::StopIteration; } Http::ResponseHeaderMapPtr response_headers{ Http::createHeaderMap( @@ -283,7 +283,7 @@ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& he config_->stats().oauth_unauthorized_rq_.inc(); - return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + return Http::FilterHeadersStatus::StopIteration; } // At this point, we *are* on /_oauth. We believe this request comes from the authorization @@ -292,14 +292,14 @@ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& he const auto query_parameters = Http::Utility::parseQueryString(path_str); if (query_parameters.find(queryParamsError()) != query_parameters.end()) { sendUnauthorizedResponse(); - return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + return Http::FilterHeadersStatus::StopIteration; } // if the data we need is not present on the URL, stop execution if (query_parameters.find(queryParamsCode()) == query_parameters.end() || query_parameters.find(queryParamsState()) == query_parameters.end()) { sendUnauthorizedResponse(); - return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + return Http::FilterHeadersStatus::StopIteration; } auth_code_ = query_parameters.at(queryParamsCode()); @@ -308,7 +308,7 @@ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& he Http::Utility::Url state_url; if (!state_url.initialize(state_, false)) { sendUnauthorizedResponse(); - return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + return Http::FilterHeadersStatus::StopIteration; } Formatter::FormatterImpl formatter(config_->redirectUri()); @@ -359,7 +359,7 @@ Http::FilterHeadersStatus OAuth2Filter::signOutUser(const Http::RequestHeaderMap response_headers->setLocation(new_path); decoder_callbacks_->encodeHeaders(std::move(response_headers), true, SIGN_OUT); - return Http::FilterHeadersStatus::StopAllIterationAndBuffer; + return Http::FilterHeadersStatus::StopIteration; } void OAuth2Filter::onGetAccessTokenSuccess(const std::string& access_code, diff --git a/source/extensions/tracers/common/ot/opentracing_driver_impl.h b/source/extensions/tracers/common/ot/opentracing_driver_impl.h index 2bfbddfe1886..ef10c1592667 100644 --- a/source/extensions/tracers/common/ot/opentracing_driver_impl.h +++ b/source/extensions/tracers/common/ot/opentracing_driver_impl.h @@ -5,6 +5,7 @@ #include "envoy/stats/scope.h" #include "envoy/tracing/http_tracer.h" +#include "common/common/empty_string.h" #include "common/common/logger.h" #include "common/singleton/const_singleton.h" @@ -43,6 +44,9 @@ class OpenTracingSpan : public Tracing::Span, Logger::Loggable { // X-Ray doesn't support baggage, so noop these OpenTracing functions. void setBaggage(absl::string_view, absl::string_view) override {} - std::string getBaggage(absl::string_view) override { return std::string(); } + std::string getBaggage(absl::string_view) override { return EMPTY_STRING; } + + // TODO: This method is unimplemented for X-Ray. + std::string getTraceIdAsHex() const override { return EMPTY_STRING; }; /** * Creates a child span. diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc index badecded9a15..d25f143cec47 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.cc @@ -2,6 +2,7 @@ #include "envoy/config/trace/v3/zipkin.pb.h" +#include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/fmt.h" #include "common/common/utility.h" @@ -37,7 +38,7 @@ void ZipkinSpan::log(SystemTime timestamp, const std::string& event) { // TODO(#11622): Implement baggage storage for zipkin spans void ZipkinSpan::setBaggage(absl::string_view, absl::string_view) {} -std::string ZipkinSpan::getBaggage(absl::string_view) { return std::string(); } +std::string ZipkinSpan::getBaggage(absl::string_view) { return EMPTY_STRING; } void ZipkinSpan::injectContext(Http::RequestHeaderMap& request_headers) { // Set the trace-id and span-id headers properly, based on the newly-created span structure. diff --git a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h index 37c39b0adbb0..c5a234fd8bb1 100644 --- a/source/extensions/tracers/zipkin/zipkin_tracer_impl.h +++ b/source/extensions/tracers/zipkin/zipkin_tracer_impl.h @@ -8,6 +8,7 @@ #include "envoy/tracing/http_tracer.h" #include "envoy/upstream/cluster_manager.h" +#include "common/common/empty_string.h" #include "common/http/async_client_utility.h" #include "common/http/header_map_impl.h" #include "common/json/json_loader.h" @@ -80,6 +81,9 @@ class ZipkinSpan : public Tracing::Span { void setBaggage(absl::string_view, absl::string_view) override; std::string getBaggage(absl::string_view) override; + // TODO: This method is unimplemented for Zipkin. + std::string getTraceIdAsHex() const override { return EMPTY_STRING; }; + /** * @return a reference to the Zipkin::Span object. */ diff --git a/source/server/listener_hooks.h b/source/server/listener_hooks.h index 1b3de394ab13..1d88ab4760af 100644 --- a/source/server/listener_hooks.h +++ b/source/server/listener_hooks.h @@ -22,6 +22,11 @@ class ListenerHooks { */ virtual void onWorkerListenerRemoved() PURE; + /** + * Called when all workers have started. + */ + virtual void onWorkersStarted() PURE; + /** * Called when the Runtime::ScopedLoaderSingleton is created by the server. */ @@ -36,6 +41,7 @@ class DefaultListenerHooks : public ListenerHooks { // ListenerHooks void onWorkerListenerAdded() override {} void onWorkerListenerRemoved() override {} + void onWorkersStarted() override {} void onRuntimeCreated() override {} }; diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index fb17e6810ed2..eb588456a9b9 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -884,7 +884,7 @@ bool ListenerManagerImpl::removeListenerInternal(const std::string& name, return true; } -void ListenerManagerImpl::startWorkers(GuardDog& guard_dog) { +void ListenerManagerImpl::startWorkers(GuardDog& guard_dog, std::function callback) { ENVOY_LOG(info, "all dependencies initialized. starting workers"); ASSERT(!workers_started_); workers_started_ = true; @@ -899,11 +899,13 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog) { ENVOY_LOG(debug, "starting worker {}", i); ASSERT(warming_listeners_.empty()); for (const auto& listener : active_listeners_) { - addListenerToWorker(*worker, absl::nullopt, *listener, [this, listeners_pending_init]() { - if (--(*listeners_pending_init) == 0) { - stats_.workers_started_.set(1); - } - }); + addListenerToWorker(*worker, absl::nullopt, *listener, + [this, listeners_pending_init, callback]() { + if (--(*listeners_pending_init) == 0) { + stats_.workers_started_.set(1); + callback(); + } + }); } worker->start(guard_dog); if (enable_dispatcher_stats_) { @@ -913,6 +915,7 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog) { } if (active_listeners_.empty()) { stats_.workers_started_.set(1); + callback(); } } diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index c29a0f8478ea..63bef43f0993 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -193,7 +193,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable callback) override; void stopListeners(StopListenersType stop_listeners_type) override; void stopWorkers() override; void beginListenerUpdate() override { error_state_tracker_.clear(); } diff --git a/source/server/server.cc b/source/server/server.cc index ef9e211c2e8f..fc41edc2ed08 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -87,7 +87,7 @@ InstanceImpl::InstanceImpl( : nullptr), grpc_context_(store.symbolTable()), http_context_(store.symbolTable()), router_context_(store.symbolTable()), process_context_(std::move(process_context)), - main_thread_id_(std::this_thread::get_id()), server_contexts_(*this) { + main_thread_id_(std::this_thread::get_id()), hooks_(hooks), server_contexts_(*this) { try { if (!options.logPath().empty()) { try { @@ -609,15 +609,22 @@ void InstanceImpl::onRuntimeReady() { } void InstanceImpl::startWorkers() { - listener_manager_->startWorkers(*worker_guard_dog_); - initialization_timer_->complete(); - // Update server stats as soon as initialization is done. - updateServerStats(); - workers_started_ = true; - // At this point we are ready to take traffic and all listening ports are up. Notify our parent - // if applicable that they can stop listening and drain. - restarter_.drainParentListeners(); - drain_manager_->startParentShutdownSequence(); + // The callback will be called after workers are started. + listener_manager_->startWorkers(*worker_guard_dog_, [this]() { + if (isShutdown()) { + return; + } + + initialization_timer_->complete(); + // Update server stats as soon as initialization is done. + updateServerStats(); + workers_started_ = true; + hooks_.onWorkersStarted(); + // At this point we are ready to take traffic and all listening ports are up. Notify our + // parent if applicable that they can stop listening and drain. + restarter_.drainParentListeners(); + drain_manager_->startParentShutdownSequence(); + }); } Runtime::LoaderPtr InstanceUtil::createRuntime(Instance& server, diff --git a/source/server/server.h b/source/server/server.h index 47d6c6b0a18a..1b9147fc6eb7 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -365,6 +365,7 @@ class InstanceImpl final : Logger::Loggable, // initialization_time is a histogram for tracking the initialization time across hot restarts // whenever we have support for histogram merge across hot restarts. Stats::TimespanPtr initialization_timer_; + ListenerHooks& hooks_; ServerFactoryContextImpl server_contexts_; diff --git a/test/common/config/datasource_test.cc b/test/common/config/datasource_test.cc index 70860a7fefd2..8e2900b38c4a 100644 --- a/test/common/config/datasource_test.cc +++ b/test/common/config/datasource_test.cc @@ -105,6 +105,36 @@ TEST_F(AsyncDataSourceTest, LoadLocalDataSource) { EXPECT_EQ(async_data, "xxxxxx"); } +TEST_F(AsyncDataSourceTest, LoadLocalEmptyDataSource) { + AsyncDataSourcePb config; + + std::string yaml = R"EOF( + local: + inline_string: "" + )EOF"; + TestUtility::loadFromYamlAndValidate(yaml, config); + EXPECT_TRUE(config.has_local()); + + std::string async_data; + + EXPECT_CALL(init_manager_, add(_)).WillOnce(Invoke([this](const Init::Target& target) { + init_target_handle_ = target.createHandle("test"); + })); + + local_data_provider_ = std::make_unique( + init_manager_, config.local(), true, *api_, [&](const std::string& data) { + EXPECT_EQ(init_manager_.state(), Init::Manager::State::Initializing); + EXPECT_EQ(data, ""); + async_data = data; + }); + + EXPECT_CALL(init_manager_, state()).WillOnce(Return(Init::Manager::State::Initializing)); + EXPECT_CALL(init_watcher_, ready()); + + init_target_handle_->initialize(init_watcher_); + EXPECT_EQ(async_data, ""); +} + TEST_F(AsyncDataSourceTest, LoadRemoteDataSourceNoCluster) { AsyncDataSourcePb config; diff --git a/test/common/config/grpc_mux_impl_test.cc b/test/common/config/grpc_mux_impl_test.cc index 6544290bc7aa..f8ffa0329baf 100644 --- a/test/common/config/grpc_mux_impl_test.cc +++ b/test/common/config/grpc_mux_impl_test.cc @@ -159,18 +159,24 @@ TEST_F(GrpcMuxImplTest, ResetStream) { expectSendMessage("baz", {"z"}, ""); grpc_mux_->start(); + // Send another message for foo so that the node is cleared in the cached request. + // This is to test that the the node is set again in the first message below. + expectSendMessage("foo", {"z", "x", "y"}, ""); + auto foo_z_sub = grpc_mux_->addWatch("foo", {"z"}, callbacks_, resource_decoder_); + EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::ConnectionFailure, _)) - .Times(3); + .Times(4); EXPECT_CALL(random_, random()); EXPECT_CALL(*timer, enableTimer(_, _)); grpc_mux_->grpcStreamForTest().onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Canceled, ""); EXPECT_EQ(0, control_plane_connected_state_.value()); EXPECT_EQ(0, control_plane_pending_requests_.value()); EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); - expectSendMessage("foo", {"x", "y"}, "", true); + expectSendMessage("foo", {"z", "x", "y"}, "", true); expectSendMessage("bar", {}, ""); expectSendMessage("baz", {"z"}, ""); + expectSendMessage("foo", {"x", "y"}, ""); timer->invokeCallback(); expectSendMessage("baz", {}, ""); diff --git a/test/common/config/grpc_subscription_test_harness.h b/test/common/config/grpc_subscription_test_harness.h index f0950a8dca31..929f91619a10 100644 --- a/test/common/config/grpc_subscription_test_harness.h +++ b/test/common/config/grpc_subscription_test_harness.h @@ -44,7 +44,7 @@ class GrpcSubscriptionTestHarness : public SubscriptionTestHarness { "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints")), async_client_(new NiceMock()) { node_.set_id("fo0"); - EXPECT_CALL(local_info_, node()).WillOnce(testing::ReturnRef(node_)); + EXPECT_CALL(local_info_, node()).WillRepeatedly(testing::ReturnRef(node_)); ttl_timer_ = new NiceMock(&dispatcher_); timer_ = new Event::MockTimer(&dispatcher_); diff --git a/test/common/conn_pool/conn_pool_base_test.cc b/test/common/conn_pool/conn_pool_base_test.cc index c70632faa871..d9981c7ad7ba 100644 --- a/test/common/conn_pool/conn_pool_base_test.cc +++ b/test/common/conn_pool/conn_pool_base_test.cc @@ -90,9 +90,9 @@ class ConnPoolImplBaseTest : public testing::Test { std::vector clients_; }; -TEST_F(ConnPoolImplBaseTest, BasicPrefetch) { +TEST_F(ConnPoolImplBaseTest, BasicPreconnect) { // Create more than one connection per new stream. - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); // On new stream, create 2 connections. CHECK_STATE(0 /*active*/, 0 /*pending*/, 0 /*connecting capacity*/); @@ -105,11 +105,11 @@ TEST_F(ConnPoolImplBaseTest, BasicPrefetch) { pool_.destructAllConnections(); } -TEST_F(ConnPoolImplBaseTest, PrefetchOnDisconnect) { +TEST_F(ConnPoolImplBaseTest, PreconnectOnDisconnect) { testing::InSequence s; // Create more than one connection per new stream. - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); // On new stream, create 2 connections. EXPECT_CALL(pool_, instantiateActiveClient).Times(2); @@ -129,9 +129,9 @@ TEST_F(ConnPoolImplBaseTest, PrefetchOnDisconnect) { pool_.destructAllConnections(); } -TEST_F(ConnPoolImplBaseTest, NoPrefetchIfUnhealthy) { +TEST_F(ConnPoolImplBaseTest, NoPreconnectIfUnhealthy) { // Create more than one connection per new stream. - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); host_->healthFlagSet(Upstream::Host::HealthFlag::FAILED_ACTIVE_HC); EXPECT_EQ(host_->health(), Upstream::Host::Health::Unhealthy); @@ -145,9 +145,9 @@ TEST_F(ConnPoolImplBaseTest, NoPrefetchIfUnhealthy) { pool_.destructAllConnections(); } -TEST_F(ConnPoolImplBaseTest, NoPrefetchIfDegraded) { +TEST_F(ConnPoolImplBaseTest, NoPreconnectIfDegraded) { // Create more than one connection per new stream. - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); EXPECT_EQ(host_->health(), Upstream::Host::Health::Healthy); host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH); @@ -161,34 +161,34 @@ TEST_F(ConnPoolImplBaseTest, NoPrefetchIfDegraded) { pool_.destructAllConnections(); } -TEST_F(ConnPoolImplBaseTest, ExplicitPrefetch) { +TEST_F(ConnPoolImplBaseTest, ExplicitPreconnect) { // Create more than one connection per new stream. - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); EXPECT_CALL(pool_, instantiateActiveClient).Times(AnyNumber()); - // With global prefetch off, we won't prefetch. - EXPECT_FALSE(pool_.maybePrefetch(0)); + // With global preconnect off, we won't preconnect. + EXPECT_FALSE(pool_.maybePreconnect(0)); CHECK_STATE(0 /*active*/, 0 /*pending*/, 0 /*connecting capacity*/); - // With prefetch ratio of 1.1, we'll prefetch two connections. - // Currently, no number of subsequent calls to prefetch will increase that. - EXPECT_TRUE(pool_.maybePrefetch(1.1)); - EXPECT_TRUE(pool_.maybePrefetch(1.1)); - EXPECT_FALSE(pool_.maybePrefetch(1.1)); + // With preconnect ratio of 1.1, we'll preconnect two connections. + // Currently, no number of subsequent calls to preconnect will increase that. + EXPECT_TRUE(pool_.maybePreconnect(1.1)); + EXPECT_TRUE(pool_.maybePreconnect(1.1)); + EXPECT_FALSE(pool_.maybePreconnect(1.1)); CHECK_STATE(0 /*active*/, 0 /*pending*/, 2 /*connecting capacity*/); - // With a higher prefetch ratio, more connections may be prefetched. - EXPECT_TRUE(pool_.maybePrefetch(3)); + // With a higher preconnect ratio, more connections may be preconnected. + EXPECT_TRUE(pool_.maybePreconnect(3)); pool_.destructAllConnections(); } -TEST_F(ConnPoolImplBaseTest, ExplicitPrefetchNotHealthy) { +TEST_F(ConnPoolImplBaseTest, ExplicitPreconnectNotHealthy) { // Create more than one connection per new stream. - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); - // Prefetch won't occur if the host is not healthy. + // Preconnect won't occur if the host is not healthy. host_->healthFlagSet(Upstream::Host::HealthFlag::DEGRADED_EDS_HEALTH); - EXPECT_FALSE(pool_.maybePrefetch(1)); + EXPECT_FALSE(pool_.maybePreconnect(1)); } } // namespace ConnectionPool diff --git a/test/common/grpc/context_impl_test.cc b/test/common/grpc/context_impl_test.cc index 745ddb797f49..c7ccd3476803 100644 --- a/test/common/grpc/context_impl_test.cc +++ b/test/common/grpc/context_impl_test.cc @@ -18,7 +18,7 @@ namespace Grpc { TEST(GrpcContextTest, ChargeStats) { NiceMock cluster; - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Stats::StatNamePool pool(*symbol_table_); const Stats::StatName service = pool.add("service"); const Stats::StatName method = pool.add("method"); @@ -68,7 +68,7 @@ TEST(GrpcContextTest, ResolveServiceAndMethod) { Http::TestRequestHeaderMapImpl headers; headers.setPath("/service_name/method_name?a=b"); const Http::HeaderEntry* path = headers.Path(); - Stats::TestSymbolTable symbol_table; + Stats::TestUtil::TestSymbolTable symbol_table; ContextImpl context(*symbol_table); absl::optional request_names = context.resolveDynamicServiceAndMethod(path); diff --git a/test/common/grpc/grpc_client_integration_test_harness.h b/test/common/grpc/grpc_client_integration_test_harness.h index 5c86df4d8b84..7a611435607c 100644 --- a/test/common/grpc/grpc_client_integration_test_harness.h +++ b/test/common/grpc/grpc_client_integration_test_harness.h @@ -441,7 +441,7 @@ class GrpcClientIntegrationTest : public GrpcClientIntegrationParamTest { FakeHttpConnectionPtr fake_connection_; std::vector fake_streams_; const Protobuf::MethodDescriptor* method_descriptor_; - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Stats::IsolatedStoreImpl* stats_store_ = new Stats::IsolatedStoreImpl(*symbol_table_); Api::ApiPtr api_; Event::DispatcherPtr dispatcher_; diff --git a/test/common/http/codes_test.cc b/test/common/http/codes_test.cc index bcbcf4820489..b55876a3e340 100644 --- a/test/common/http/codes_test.cc +++ b/test/common/http/codes_test.cc @@ -44,7 +44,7 @@ class CodeUtilityTest : public testing::Test { code_stats_.chargeResponseStat(info); } - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Stats::TestUtil::TestStore global_store_; Stats::TestUtil::TestStore cluster_scope_; Http::CodeStatsImpl code_stats_; diff --git a/test/common/http/conn_manager_impl_test_2.cc b/test/common/http/conn_manager_impl_test_2.cc index 8fa74b347d0d..76495244f508 100644 --- a/test/common/http/conn_manager_impl_test_2.cc +++ b/test/common/http/conn_manager_impl_test_2.cc @@ -1453,7 +1453,7 @@ TEST_F(HttpConnectionManagerImplTest, FilterHeadReply) { .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, "Bad request", nullptr, absl::nullopt, ""); - return FilterHeadersStatus::Continue; + return FilterHeadersStatus::StopIteration; })); EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true)); @@ -1483,7 +1483,7 @@ TEST_F(HttpConnectionManagerImplTest, ResetWithStoppedFilter) { .WillOnce(InvokeWithoutArgs([&]() -> FilterHeadersStatus { decoder_filters_[0]->callbacks_->sendLocalReply(Code::BadRequest, "Bad request", nullptr, absl::nullopt, ""); - return FilterHeadersStatus::Continue; + return FilterHeadersStatus::StopIteration; })); EXPECT_CALL(response_encoder_, streamErrorOnInvalidHttpMessage()).WillOnce(Return(true)); @@ -2356,7 +2356,7 @@ TEST_F(HttpConnectionManagerImplTest, TestSessionTrace) { object->dumpState(out); std::string state = out.str(); EXPECT_THAT(state, - testing::HasSubstr("filter_manager_callbacks_.requestHeaders(): empty")); + testing::HasSubstr("filter_manager_callbacks_.requestHeaders(): null")); EXPECT_THAT(state, testing::HasSubstr("protocol_: 1")); return nullptr; })) diff --git a/test/common/http/filter_manager_test.cc b/test/common/http/filter_manager_test.cc index 08a34e7054f3..85d755e864cb 100644 --- a/test/common/http/filter_manager_test.cc +++ b/test/common/http/filter_manager_test.cc @@ -1,3 +1,4 @@ +#include "envoy/common/optref.h" #include "envoy/http/filter.h" #include "envoy/http/header_map.h" #include "envoy/matcher/matcher.h" @@ -65,7 +66,7 @@ TEST_F(FilterManagerTest, SendLocalReplyDuringDecodingGrpcClassiciation) { {"content-type", "application/grpc"}}}; ON_CALL(filter_manager_callbacks_, requestHeaders()) - .WillByDefault(Return(absl::make_optional(std::ref(*grpc_headers)))); + .WillByDefault(Return(makeOptRef(*grpc_headers))); EXPECT_CALL(filter_factory_, createFilterChain(_)) .WillRepeatedly(Invoke([&](FilterChainFactoryCallbacks& callbacks) -> void { @@ -127,7 +128,7 @@ TEST_F(FilterManagerTest, SendLocalReplyDuringEncodingGrpcClassiciation) { {"content-type", "application/grpc"}}}; ON_CALL(filter_manager_callbacks_, requestHeaders()) - .WillByDefault(Return(absl::make_optional(std::ref(*grpc_headers)))); + .WillByDefault(Return(makeOptRef(*grpc_headers))); filter_manager_->createFilterChain(); filter_manager_->requestHeadersInitialized(); @@ -186,7 +187,7 @@ TEST_F(FilterManagerTest, MatchTreeSkipActionDecodingHeaders) { {"content-type", "application/grpc"}}}; ON_CALL(filter_manager_callbacks_, requestHeaders()) - .WillByDefault(Return(absl::make_optional(std::ref(*grpc_headers)))); + .WillByDefault(Return(makeOptRef(*grpc_headers))); filter_manager_->createFilterChain(); filter_manager_->requestHeadersInitialized(); @@ -237,7 +238,7 @@ TEST_F(FilterManagerTest, MatchTreeSkipActionRequestAndResponseHeaders) { Buffer::OwnedImpl data("data"); ON_CALL(filter_manager_callbacks_, requestHeaders()) - .WillByDefault(Return(absl::make_optional(std::ref(*headers)))); + .WillByDefault(Return((makeOptRef(*headers)))); filter_manager_->createFilterChain(); EXPECT_CALL(filter_manager_callbacks_, encodeHeaders(_, _)); diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 9efa5bb181c7..cd31d3ca441c 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -1155,7 +1155,7 @@ class MockDestructSchedulableCallback : public Event::MockSchedulableCallback { public: MockDestructSchedulableCallback(Event::MockDispatcher* dispatcher) : Event::MockSchedulableCallback(dispatcher) {} - MOCK_METHOD0(Die, void()); + MOCK_METHOD(void, Die, ()); ~MockDestructSchedulableCallback() override { Die(); } }; diff --git a/test/common/http/http2/BUILD b/test/common/http/http2/BUILD index 8dca77e677b5..9c319e500896 100644 --- a/test/common/http/http2/BUILD +++ b/test/common/http/http2/BUILD @@ -4,6 +4,7 @@ load( "envoy_cc_test", "envoy_cc_test_library", "envoy_package", + "envoy_proto_library", ) licenses(["notice"]) # Apache 2 @@ -177,3 +178,22 @@ envoy_cc_fuzz_test( "//test/common/http/http2:codec_impl_test_util", ], ) + +envoy_proto_library( + name = "hpack_fuzz_proto", + srcs = ["hpack_fuzz.proto"], + deps = ["//test/fuzz:common_proto"], +) + +envoy_cc_fuzz_test( + name = "hpack_fuzz_test", + srcs = ["hpack_fuzz_test.cc"], + corpus = "hpack_corpus", + external_deps = [ + "nghttp2", + ], + deps = [ + ":hpack_fuzz_proto_cc_proto", + "//test/test_common:utility_lib", + ], +) diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index d8f41b263dd7..0315683878f2 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -1399,11 +1399,11 @@ TEST_F(Http2ConnPoolImplTest, DrainedConnectionsNotActive) { closeClient(0); } -TEST_F(Http2ConnPoolImplTest, PrefetchWithoutMultiplexing) { +TEST_F(Http2ConnPoolImplTest, PreconnectWithoutMultiplexing) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); - // With one request per connection, and prefetch 1.5, the first request will + // With one request per connection, and preconnect 1.5, the first request will // kick off 2 connections. expectClientsCreate(2); ActiveTestRequest r1(*this, 0, false); @@ -1429,14 +1429,14 @@ TEST_F(Http2ConnPoolImplTest, PrefetchWithoutMultiplexing) { closeAllClients(); } -TEST_F(Http2ConnPoolImplTest, PrefetchOff) { +TEST_F(Http2ConnPoolImplTest, PreconnectOff) { TestScopedRuntime scoped_runtime; Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.reloadable_features.allow_prefetch", "false"}}); + {{"envoy.reloadable_features.allow_preconnect", "false"}}); cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); - // Despite the prefetch ratio, no prefetch will happen due to the runtime + // Despite the preconnect ratio, no preconnect will happen due to the runtime // disable. expectClientsCreate(1); ActiveTestRequest r1(*this, 0, false); @@ -1447,11 +1447,11 @@ TEST_F(Http2ConnPoolImplTest, PrefetchOff) { closeAllClients(); } -TEST_F(Http2ConnPoolImplTest, PrefetchWithMultiplexing) { +TEST_F(Http2ConnPoolImplTest, PreconnectWithMultiplexing) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(2); - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); - // With two requests per connection, and prefetch 1.5, the first request will + // With two requests per connection, and preconnect 1.5, the first request will // only kick off 1 connection. expectClientsCreate(1); ActiveTestRequest r1(*this, 0, false); @@ -1470,11 +1470,11 @@ TEST_F(Http2ConnPoolImplTest, PrefetchWithMultiplexing) { closeAllClients(); } -TEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) { +TEST_F(Http2ConnPoolImplTest, PreconnectEvenWhenReady) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); - // With one request per connection, and prefetch 1.5, the first request will + // With one request per connection, and preconnect 1.5, the first request will // kick off 2 connections. expectClientsCreate(2); ActiveTestRequest r1(*this, 0, false); @@ -1485,7 +1485,7 @@ TEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) { expectClientConnect(1); // The next incoming request will immediately be assigned a stream, and also - // kick off a prefetch. + // kick off a preconnect. expectClientsCreate(1); ActiveTestRequest r2(*this, 1, true); @@ -1496,9 +1496,9 @@ TEST_F(Http2ConnPoolImplTest, PrefetchEvenWhenReady) { closeAllClients(); } -TEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) { +TEST_F(Http2ConnPoolImplTest, PreconnectAfterTimeout) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); expectClientsCreate(2); ActiveTestRequest r1(*this, 0, false); @@ -1506,7 +1506,7 @@ TEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) { // When the first client connects, r1 will be assigned. expectClientConnect(0, r1); - // Now cause the prefetched connection to fail. We should try to create + // Now cause the preconnected connection to fail. We should try to create // another in its place. expectClientsCreate(1); test_clients_[1].connect_timer_->invokeCallback(); @@ -1517,20 +1517,20 @@ TEST_F(Http2ConnPoolImplTest, PrefetchAfterTimeout) { closeAllClients(); } -TEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) { +TEST_F(Http2ConnPoolImplTest, CloseExcessWithPreconnect) { cluster_->http2_options_.mutable_max_concurrent_streams()->set_value(1); - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.00)); + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.00)); - // First request prefetches an additional connection. + // First request preconnects an additional connection. expectClientsCreate(1); ActiveTestRequest r1(*this, 0, false); - // Second request does not prefetch. + // Second request does not preconnect. expectClientsCreate(1); ActiveTestRequest r2(*this, 0, false); - // Change the prefetch ratio to force the connection to no longer be excess. - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(2)); + // Change the preconnect ratio to force the connection to no longer be excess. + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(2)); // Closing off the second request should bring us back to 1 request in queue, // desired capacity 2, so will not close the connection. EXPECT_CALL(*this, onClientDestroy()).Times(0); @@ -1542,14 +1542,14 @@ TEST_F(Http2ConnPoolImplTest, CloseExcessWithPrefetch) { closeAllClients(); } -// Test that maybePrefetch is passed up to the base class implementation. -TEST_F(Http2ConnPoolImplTest, MaybePrefetch) { - ON_CALL(*cluster_, perUpstreamPrefetchRatio).WillByDefault(Return(1.5)); +// Test that maybePreconnect is passed up to the base class implementation. +TEST_F(Http2ConnPoolImplTest, MaybePreconnect) { + ON_CALL(*cluster_, perUpstreamPreconnectRatio).WillByDefault(Return(1.5)); - EXPECT_FALSE(pool_->maybePrefetch(0)); + EXPECT_FALSE(pool_->maybePreconnect(0)); expectClientsCreate(1); - EXPECT_TRUE(pool_->maybePrefetch(2)); + EXPECT_TRUE(pool_->maybePreconnect(2)); pool_->drainConnections(); closeAllClients(); diff --git a/test/common/http/http2/hpack_corpus/crash-52ef0a2d4d861941325ba57fde63d2aa700f43cf b/test/common/http/http2/hpack_corpus/crash-52ef0a2d4d861941325ba57fde63d2aa700f43cf new file mode 100644 index 000000000000..c64b2e0619af --- /dev/null +++ b/test/common/http/http2/hpack_corpus/crash-52ef0a2d4d861941325ba57fde63d2aa700f43cf @@ -0,0 +1,3 @@ +headers { +} +end_headers: true diff --git a/test/common/http/http2/hpack_corpus/example b/test/common/http/http2/hpack_corpus/example new file mode 100644 index 000000000000..4c0e0912324d --- /dev/null +++ b/test/common/http/http2/hpack_corpus/example @@ -0,0 +1,14 @@ +headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } +} \ No newline at end of file diff --git a/test/common/http/http2/hpack_corpus/example_many b/test/common/http/http2/hpack_corpus/example_many new file mode 100644 index 000000000000..efc059ff8e00 --- /dev/null +++ b/test/common/http/http2/hpack_corpus/example_many @@ -0,0 +1,30 @@ +headers { + headers { + key: ":method" + value: "GET" + } + headers { + key: ":path" + value: "/" + } + headers { + key: ":scheme" + value: "http" + } + headers { + key: ":host" + value: "foo" + } + headers { + key: "x-envoy" + value: "one_value" + } + headers { + key: "x-envoy" + value: "another" + } + headers { + key: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + value: "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } +} \ No newline at end of file diff --git a/test/common/http/http2/hpack_fuzz.proto b/test/common/http/http2/hpack_fuzz.proto new file mode 100644 index 000000000000..1e91c39ea596 --- /dev/null +++ b/test/common/http/http2/hpack_fuzz.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package test.common.http.http2; + +import "test/fuzz/common.proto"; + +import "validate/validate.proto"; + +// Structured input for hpack_fuzz_test. + +message HpackTestCase { + test.fuzz.Headers headers = 1 [(validate.rules).message.required = true]; + bool end_headers = 2; +} \ No newline at end of file diff --git a/test/common/http/http2/hpack_fuzz_test.cc b/test/common/http/http2/hpack_fuzz_test.cc new file mode 100644 index 000000000000..6cab23df21f4 --- /dev/null +++ b/test/common/http/http2/hpack_fuzz_test.cc @@ -0,0 +1,155 @@ +// Fuzzer for HPACK encoding and decoding. +// TODO(asraa): Speed up by using raw byte input and separators rather than protobuf input. + +#include + +#include "test/common/http/http2/hpack_fuzz.pb.validate.h" +#include "test/fuzz/fuzz_runner.h" +#include "test/test_common/utility.h" + +#include "absl/container/fixed_array.h" +#include "nghttp2/nghttp2.h" + +namespace Envoy { +namespace Http { +namespace Http2 { +namespace { + +// Dynamic Header Table Size +constexpr int kHeaderTableSize = 4096; + +std::vector createNameValueArray(const test::fuzz::Headers& input) { + const size_t nvlen = input.headers().size(); + std::vector nva(nvlen); + int i = 0; + for (const auto& header : input.headers()) { + // TODO(asraa): Consider adding flags in fuzzed input. + const uint8_t flags = 0; + nva[i++] = {const_cast(reinterpret_cast(header.key().data())), + const_cast(reinterpret_cast(header.value().data())), + header.key().size(), header.value().size(), flags}; + } + + return nva; +} + +Buffer::OwnedImpl encodeHeaders(nghttp2_hd_deflater* deflater, + const std::vector& input_nv) { + // Estimate the upper bound + const size_t buflen = nghttp2_hd_deflate_bound(deflater, input_nv.data(), input_nv.size()); + + Buffer::RawSlice iovec; + Buffer::OwnedImpl payload; + payload.reserve(buflen, &iovec, 1); + ASSERT(iovec.len_ >= buflen); + + // Encode using nghttp2 + uint8_t* buf = reinterpret_cast(iovec.mem_); + ASSERT(input_nv.data() != nullptr); + const ssize_t result = + nghttp2_hd_deflate_hd(deflater, buf, buflen, input_nv.data(), input_nv.size()); + ASSERT(result >= 0, absl::StrCat("Failed to decode with result ", result)); + + iovec.len_ = result; + payload.commit(&iovec, 1); + + return payload; +} + +std::vector decodeHeaders(nghttp2_hd_inflater* inflater, + const Buffer::OwnedImpl& payload, bool end_headers) { + // Decode using nghttp2 + Buffer::RawSliceVector slices = payload.getRawSlices(); + const int num_slices = slices.size(); + ASSERT(num_slices == 1, absl::StrCat("number of slices ", num_slices)); + + std::vector decoded_headers; + int inflate_flags = 0; + nghttp2_nv decoded_nv; + while (slices[0].len_ > 0) { + ssize_t result = nghttp2_hd_inflate_hd2(inflater, &decoded_nv, &inflate_flags, + reinterpret_cast(slices[0].mem_), + slices[0].len_, end_headers); + // Decoding should not fail and data should not be left in slice. + ASSERT(result >= 0); + + slices[0].mem_ = reinterpret_cast(slices[0].mem_) + result; + slices[0].len_ -= result; + + if (inflate_flags & NGHTTP2_HD_INFLATE_EMIT) { + // One header key value pair has been successfully decoded. + decoded_headers.push_back(decoded_nv); + } + } + + if (end_headers) { + nghttp2_hd_inflate_end_headers(inflater); + } + + return decoded_headers; +} + +struct NvComparator { + inline bool operator()(const nghttp2_nv& a, const nghttp2_nv& b) { + absl::string_view a_str(reinterpret_cast(a.name), a.namelen); + absl::string_view b_str(reinterpret_cast(b.name), b.namelen); + return a_str.compare(b_str); + } +}; + +DEFINE_PROTO_FUZZER(const test::common::http::http2::HpackTestCase& input) { + // Validate headers. + try { + TestUtility::validate(input); + } catch (const EnvoyException& e) { + ENVOY_LOG_MISC(trace, "EnvoyException: {}", e.what()); + return; + } + + // Create name value pairs from headers. + std::vector input_nv = createNameValueArray(input.headers()); + // Skip encoding empty headers. nghttp2 will throw a nullptr error on runtime if it receives a + // nullptr input. + if (!input_nv.data()) { + return; + } + + // Create Deflater and Inflater + nghttp2_hd_deflater* deflater = nullptr; + int rc = nghttp2_hd_deflate_new(&deflater, kHeaderTableSize); + ASSERT(rc == 0); + nghttp2_hd_inflater* inflater = nullptr; + rc = nghttp2_hd_inflate_new(&inflater); + ASSERT(rc == 0); + + // Encode headers with nghttp2. + const Buffer::OwnedImpl payload = encodeHeaders(deflater, input_nv); + ASSERT(!payload.getRawSlices().empty()); + + // Decode headers with nghttp2 + std::vector output_nv = decodeHeaders(inflater, payload, input.end_headers()); + + // Verify that decoded == encoded. + ASSERT(input_nv.size() == output_nv.size()); + std::sort(input_nv.begin(), input_nv.end(), NvComparator()); + std::sort(output_nv.begin(), output_nv.end(), NvComparator()); + for (size_t i = 0; i < input_nv.size(); i++) { + absl::string_view in_name = {reinterpret_cast(input_nv[i].name), input_nv[i].namelen}; + absl::string_view out_name = {reinterpret_cast(output_nv[i].name), output_nv[i].namelen}; + absl::string_view in_val = {reinterpret_cast(input_nv[i].value), input_nv[i].valuelen}; + absl::string_view out_val = {reinterpret_cast(output_nv[i].value), + output_nv[i].valuelen}; + ASSERT(in_name == out_name); + ASSERT(in_val == out_val); + } + + // Delete inflater + nghttp2_hd_inflate_del(inflater); + // Delete deflater. + nghttp2_hd_deflate_del(deflater); +} + +} // namespace +} // namespace Http2 +} // namespace Http +} // namespace Envoy diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index d50e0866000f..9e4747e06047 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -318,7 +318,7 @@ most_specific_header_mutations_wins: {0} return fmt::format(yaml, most_specific_wins); } - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Api::ApiPtr api_; NiceMock factory_context_; Event::SimulatedTimeSystem test_time_; diff --git a/test/common/stats/stat_test_utility.h b/test/common/stats/stat_test_utility.h index 93ea031db43b..745f4900f217 100644 --- a/test/common/stats/stat_test_utility.h +++ b/test/common/stats/stat_test_utility.h @@ -13,6 +13,7 @@ namespace Envoy { namespace Stats { +namespace TestUtil { class TestSymbolTableHelper { public: @@ -28,9 +29,6 @@ class TestSymbolTableHelper { // are constructed without any context, but StatNames that are symbolized from // one mock may need to be entered into stat storage in another one. Thus they // must be connected by global state. -// -// TODO(jmarantz): rename this as Stats::TestUtil::GlobalSymbolTable to clarify -// the motivation, and rename the 10 call-sites for it. class TestSymbolTable { public: SymbolTable& operator*() { return global_.get().symbolTable(); } @@ -40,8 +38,6 @@ class TestSymbolTable { Envoy::Test::Global global_; }; -namespace TestUtil { - /** * Calls fn for a sampling of plausible stat names given a number of clusters. * This is intended for memory and performance benchmarking, where the syntax of diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index df1b5a40e0ef..7fc3408c2cca 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -24,11 +24,13 @@ #include "gtest/gtest.h" using testing::_; +using testing::AnyNumber; using testing::Invoke; using testing::InvokeWithoutArgs; using testing::NiceMock; using testing::Property; using testing::Return; +using testing::StrictMock; namespace Envoy { namespace Tcp { @@ -64,7 +66,7 @@ struct ConnPoolCallbacks : public Tcp::ConnectionPool::Callbacks { pool_failure_.ready(); } - NiceMock callbacks_; + StrictMock callbacks_; ReadyWatcher pool_failure_; ReadyWatcher pool_ready_; ConnectionPool::ConnectionDataPtr conn_data_{}; @@ -110,12 +112,12 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { MOCK_METHOD(void, onConnReleasedForTest, ()); MOCK_METHOD(void, onConnDestroyedForTest, ()); - bool maybePrefetch(float ratio) override { + bool maybePreconnect(float ratio) override { if (!test_new_connection_pool_) { return false; } ASSERT(dynamic_cast(conn_pool_.get()) != nullptr); - return dynamic_cast(conn_pool_.get())->maybePrefetch(ratio); + return dynamic_cast(conn_pool_.get())->maybePreconnect(ratio); } struct TestConnection { @@ -310,7 +312,17 @@ class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, ~TcpConnPoolImplDestructorTest() override = default; void prepareConn() { - connection_ = new NiceMock(); + connection_ = new StrictMock(); + EXPECT_CALL(*connection_, setBufferLimits(0)); + EXPECT_CALL(*connection_, detectEarlyCloseWhenReadDisabled(false)); + EXPECT_CALL(*connection_, addConnectionCallbacks(_)); + EXPECT_CALL(*connection_, addReadFilter(_)); + EXPECT_CALL(*connection_, connect()); + EXPECT_CALL(*connection_, setConnectionStats(_)); + EXPECT_CALL(*connection_, noDelay(true)); + EXPECT_CALL(*connection_, streamInfo()).Times(2); + EXPECT_CALL(*connection_, id()).Times(AnyNumber()); + connect_timer_ = new NiceMock(&dispatcher_); EXPECT_CALL(dispatcher_, createClientConnection_(_, _, _, _)).WillOnce(Return(connection_)); EXPECT_CALL(*connect_timer_, enableTimer(_, _)); @@ -334,7 +346,7 @@ class TcpConnPoolImplDestructorTest : public Event::TestUsingSimulatedTime, std::shared_ptr cluster_{new NiceMock()}; NiceMock* upstream_ready_cb_; NiceMock* connect_timer_; - NiceMock* connection_; + Network::MockClientConnection* connection_; std::unique_ptr conn_pool_; std::unique_ptr callbacks_; }; @@ -751,6 +763,7 @@ TEST_P(TcpConnPoolImplTest, DisconnectWhileBound) { EXPECT_CALL(callbacks.pool_ready_, ready()); + EXPECT_CALL(callbacks.callbacks_, onEvent(_)); conn_pool_->test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Kill the connection while it has an active request. @@ -774,6 +787,7 @@ TEST_P(TcpConnPoolImplTest, DisconnectWhilePending) { EXPECT_CALL(*conn_pool_->test_conns_[0].connect_timer_, disableTimer()); EXPECT_CALL(callbacks.pool_ready_, ready()); + EXPECT_CALL(callbacks.callbacks_, onEvent(_)); conn_pool_->test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::Connected); // Second request pending. @@ -1092,16 +1106,16 @@ TEST_P(TcpConnPoolImplTest, RequestCapacity) { conn_pool_->test_conns_[2].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); } -// Test that maybePrefetch is passed up to the base class implementation. -TEST_P(TcpConnPoolImplTest, TestPrefetch) { +// Test that maybePreconnect is passed up to the base class implementation. +TEST_P(TcpConnPoolImplTest, TestPreconnect) { initialize(); if (!test_new_connection_pool_) { return; } - EXPECT_FALSE(conn_pool_->maybePrefetch(0)); + EXPECT_FALSE(conn_pool_->maybePreconnect(0)); conn_pool_->expectConnCreate(); - ASSERT_TRUE(conn_pool_->maybePrefetch(2)); + ASSERT_TRUE(conn_pool_->maybePreconnect(2)); conn_pool_->test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); } @@ -1131,6 +1145,7 @@ TEST_P(TcpConnPoolImplDestructorTest, TestPendingConnectionsAreClosed) { TEST_P(TcpConnPoolImplDestructorTest, TestBusyConnectionsAreClosed) { prepareConn(); + EXPECT_CALL(callbacks_->callbacks_, onEvent(_)); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); EXPECT_CALL(dispatcher_, clearDeferredDeleteList()); conn_pool_.reset(); diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index ae38d8c8679e..ce39a318e205 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -746,6 +746,7 @@ TEST(HttpNullTracerTest, BasicFunctionality) { span_ptr->setTag("foo", "bar"); span_ptr->setBaggage("key", "value"); ASSERT_EQ("", span_ptr->getBaggage("baggage_key")); + ASSERT_EQ(span_ptr->getTraceIdAsHex(), ""); span_ptr->injectContext(request_headers); EXPECT_NE(nullptr, span_ptr->spawnChild(config, "foo", SystemTime())); diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index bd96f50e0c1e..df0cfd1aba77 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -4179,7 +4179,7 @@ TEST_F(ClusterManagerImplTest, ConnectionPoolPerDownstreamConnection) { ->httpConnPool(ResourcePriority::Default, Http::Protocol::Http11, &lb_context)); } -class PrefetchTest : public ClusterManagerImplTest { +class PreconnectTest : public ClusterManagerImplTest { public: void initialize(float ratio) { const std::string yaml = R"EOF( @@ -4197,8 +4197,8 @@ class PrefetchTest : public ClusterManagerImplTest { if (ratio != 0) { config.mutable_static_resources() ->mutable_clusters(0) - ->mutable_prefetch_policy() - ->mutable_predictive_prefetch_ratio() + ->mutable_preconnect_policy() + ->mutable_predictive_preconnect_ratio() ->set_value(ratio); } create(config); @@ -4231,8 +4231,8 @@ class PrefetchTest : public ClusterManagerImplTest { HostSharedPtr host2_; }; -TEST_F(PrefetchTest, PrefetchOff) { - // With prefetch set to 0, each request for a connection pool will only +TEST_F(PreconnectTest, PreconnectOff) { + // With preconnect set to 0, each request for a connection pool will only // allocate that conn pool. initialize(0); EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _)) @@ -4248,9 +4248,9 @@ TEST_F(PrefetchTest, PrefetchOff) { ->tcpConnPool(ResourcePriority::Default, nullptr); } -TEST_F(PrefetchTest, PrefetchOn) { - // With prefetch set to 1.1, each request for a connection pool will kick off - // prefetching, so create the pool for both the current connection and the +TEST_F(PreconnectTest, PreconnectOn) { + // With preconnect set to 1.1, each request for a connection pool will kick off + // preconnecting, so create the pool for both the current connection and the // anticipated one. initialize(1.1); EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _)) diff --git a/test/common/upstream/least_request_load_balancer_corpus/least_request-high-number-of-hosts b/test/common/upstream/least_request_load_balancer_corpus/least_request-high-number-of-hosts index 3c3f884753fa..61ca01c525d7 100644 --- a/test/common/upstream/least_request_load_balancer_corpus/least_request-high-number-of-hosts +++ b/test/common/upstream/least_request_load_balancer_corpus/least_request-high-number-of-hosts @@ -16,12 +16,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/least_request_load_balancer_corpus/least_request-no-config b/test/common/upstream/least_request_load_balancer_corpus/least_request-no-config index 470a160224c8..8a09904a464e 100644 --- a/test/common/upstream/least_request_load_balancer_corpus/least_request-no-config +++ b/test/common/upstream/least_request_load_balancer_corpus/least_request-no-config @@ -12,12 +12,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/least_request_load_balancer_corpus/least_request-no-hosts b/test/common/upstream/least_request_load_balancer_corpus/least_request-no-hosts index f27f2002d8a7..99d2ac95b61f 100644 --- a/test/common/upstream/least_request_load_balancer_corpus/least_request-no-hosts +++ b/test/common/upstream/least_request_load_balancer_corpus/least_request-no-hosts @@ -4,7 +4,7 @@ common_lb_config { } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/least_request_load_balancer_corpus/least_request-normal b/test/common/upstream/least_request_load_balancer_corpus/least_request-normal index 75417693b31b..a7adc33fbe17 100644 --- a/test/common/upstream/least_request_load_balancer_corpus/least_request-normal +++ b/test/common/upstream/least_request_load_balancer_corpus/least_request-normal @@ -12,12 +12,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/least_request_load_balancer_corpus/least_request-with-locality-high-number-of-hosts b/test/common/upstream/least_request_load_balancer_corpus/least_request-with-locality-high-number-of-hosts index 99ab1d0edc0e..b0a6871b861e 100644 --- a/test/common/upstream/least_request_load_balancer_corpus/least_request-with-locality-high-number-of-hosts +++ b/test/common/upstream/least_request_load_balancer_corpus/least_request-with-locality-high-number-of-hosts @@ -16,12 +16,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/load_balancer_fuzz.proto b/test/common/upstream/load_balancer_fuzz.proto index c152adc21248..a312636078a3 100644 --- a/test/common/upstream/load_balancer_fuzz.proto +++ b/test/common/upstream/load_balancer_fuzz.proto @@ -27,8 +27,8 @@ message LbAction { // This updates the health flags of hosts at a certain priority level. The number of hosts in each priority level/in localities is static, // as untrusted upstreams cannot change that, and can only change their health flags. UpdateHealthFlags update_health_flags = 1; - // Prefetches a host using the encapsulated specific load balancer. - google.protobuf.Empty prefetch = 2; + // Preconnects a host using the encapsulated specific load balancer. + google.protobuf.Empty preconnect = 2; // Chooses a host using the encapsulated specific load balancer. google.protobuf.Empty choose_host = 3; } diff --git a/test/common/upstream/load_balancer_fuzz_base.cc b/test/common/upstream/load_balancer_fuzz_base.cc index 96f068675044..b281af6cd992 100644 --- a/test/common/upstream/load_balancer_fuzz_base.cc +++ b/test/common/upstream/load_balancer_fuzz_base.cc @@ -214,7 +214,7 @@ void LoadBalancerFuzzBase::updateHealthFlagsForAHostSet( host_set.runCallbacks({}, {}); } -void LoadBalancerFuzzBase::prefetch() { +void LoadBalancerFuzzBase::preconnect() { // TODO: context, could generate it in proto action lb_->peekAnotherHost(nullptr); } @@ -239,8 +239,8 @@ void LoadBalancerFuzzBase::replay( event.update_health_flags().random_bytestring()); break; } - case test::common::upstream::LbAction::kPrefetch: - prefetch(); + case test::common::upstream::LbAction::kPreconnect: + preconnect(); break; case test::common::upstream::LbAction::kChooseHost: chooseHost(); diff --git a/test/common/upstream/load_balancer_fuzz_base.h b/test/common/upstream/load_balancer_fuzz_base.h index bc069cca8491..1590707dd7cc 100644 --- a/test/common/upstream/load_balancer_fuzz_base.h +++ b/test/common/upstream/load_balancer_fuzz_base.h @@ -33,7 +33,7 @@ class LoadBalancerFuzzBase { // balancer needs to run its algorithm is already encapsulated within the load balancer. Thus, // once the load balancer is constructed, all this class has to do is call lb_->peekAnotherHost() // and lb_->chooseHost(). - void prefetch(); + void preconnect(); void chooseHost(); void replay(const Protobuf::RepeatedPtrField& actions); diff --git a/test/common/upstream/outlier_detection_impl_test.cc b/test/common/upstream/outlier_detection_impl_test.cc index b71ed9e3481a..db54d1d4fb99 100644 --- a/test/common/upstream/outlier_detection_impl_test.cc +++ b/test/common/upstream/outlier_detection_impl_test.cc @@ -69,15 +69,13 @@ class OutlierDetectorImplTest : public Event::TestUsingSimulatedTime, public tes OutlierDetectorImplTest() : outlier_detection_ejections_active_(cluster_.info_->stats_store_.gauge( "outlier_detection.ejections_active", Stats::Gauge::ImportMode::Accumulate)) { - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_consecutive_5xx", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutive5xxRuntime, 100)) .WillByDefault(Return(true)); - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_success_rate", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingSuccessRateRuntime, 100)) .WillByDefault(Return(true)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_local_origin_failure_", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveLocalOriginFailureRuntime, 100)) .WillByDefault(Return(true)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_local_origin_success_rate", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingLocalOriginSuccessRateRuntime, 100)) .WillByDefault(Return(true)); // Prepare separate config with split_external_local_origin_errors set to true. @@ -138,6 +136,7 @@ success_rate_stdev_factor: 3000 failure_percentage_minimum_hosts: 10 failure_percentage_request_volume: 25 failure_percentage_threshold: 70 +max_ejection_time: 400s )EOF"; envoy::config::cluster::v3::OutlierDetection outlier_detection; @@ -162,11 +161,39 @@ failure_percentage_threshold: 70 EXPECT_EQ(10UL, detector->config().failurePercentageMinimumHosts()); EXPECT_EQ(25UL, detector->config().failurePercentageRequestVolume()); EXPECT_EQ(70UL, detector->config().failurePercentageThreshold()); + EXPECT_EQ(400000UL, detector->config().maxEjectionTimeMs()); +} + +// Test verifies that invalid outlier detector's config is rejected. +TEST_F(OutlierDetectorImplTest, DetectorStaticConfigiInvalidMaxEjectTime) { + // Create invalid config. max_ejection_time must not be smaller than base_ejection_time. + const std::string yaml = R"EOF( +interval: 0.1s +base_ejection_time: 10s +consecutive_5xx: 10 +max_ejection_percent: 50 +enforcing_consecutive_5xx: 10 +enforcing_success_rate: 20 +success_rate_minimum_hosts: 50 +success_rate_request_volume: 200 +success_rate_stdev_factor: 3000 +failure_percentage_minimum_hosts: 10 +failure_percentage_request_volume: 25 +failure_percentage_threshold: 70 +max_ejection_time: 3s + )EOF"; + + envoy::config::cluster::v3::OutlierDetection outlier_detection; + TestUtility::loadFromYaml(yaml, outlier_detection); + dispatcher_.createTimer([]() -> void {}); + // Detector should reject the config. + ASSERT_THROW(DetectorImpl::create(cluster_, outlier_detection, dispatcher_, runtime_, + time_system_, event_logger_), + EnvoyException); } TEST_F(OutlierDetectorImplTest, DestroyWithActive) { - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.max_ejection_percent", _)) - .WillByDefault(Return(100)); + ON_CALL(runtime_.snapshot_, getInteger(MaxEjectionPercentRuntime, _)).WillByDefault(Return(100)); EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); addHosts({"tcp://127.0.0.1:80"}, true); addHosts({"tcp://127.0.0.1:81"}, false); @@ -418,10 +445,9 @@ TEST_F(OutlierDetectorImplTest, BasicFlowGatewayFailure) { std::shared_ptr detector(DetectorImpl::create( cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_gateway_failure", 0)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveGatewayFailureRuntime, 0)) .WillByDefault(Return(true)); - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_consecutive_5xx", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutive5xxRuntime, 100)) .WillByDefault(Return(false)); detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); @@ -521,8 +547,8 @@ TEST_F(OutlierDetectorImplTest, TimeoutWithHttpCode) { EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); // Get the configured number of failures and simulate than number of connect failures. - uint32_t n = runtime_.snapshot_.getInteger("outlier_detection.consecutive_5xx", - detector->config().consecutive5xx()); + uint32_t n = + runtime_.snapshot_.getInteger(Consecutive5xxRuntime, detector->config().consecutive5xx()); while (n--) { hosts_[0]->outlierDetector().putResult(Result::LocalOriginTimeout, absl::optional(500)); @@ -544,8 +570,7 @@ TEST_F(OutlierDetectorImplTest, TimeoutWithHttpCode) { _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)) .Times(0); // Get the configured number of failures and simulate than number of connect failures. - n = runtime_.snapshot_.getInteger("outlier_detection.consecutive_5xx", - detector->config().consecutive5xx()); + n = runtime_.snapshot_.getInteger(Consecutive5xxRuntime, detector->config().consecutive5xx()); while (n--) { hosts_[0]->outlierDetector().putResult(Result::LocalOriginTimeout, absl::optional(200)); @@ -561,7 +586,7 @@ TEST_F(OutlierDetectorImplTest, TimeoutWithHttpCode) { logEject(std::static_pointer_cast(hosts_[0]), _, envoy::data::cluster::v2alpha::CONSECUTIVE_GATEWAY_FAILURE, false)); // Get the configured number of failures and simulate than number of connect failures. - n = runtime_.snapshot_.getInteger("outlier_detection.consecutive_gateway_failure", + n = runtime_.snapshot_.getInteger(ConsecutiveGatewayFailureRuntime, detector->config().consecutiveGatewayFailure()); while (n--) { hosts_[0]->outlierDetector().putResult(Result::LocalOriginTimeout); @@ -579,8 +604,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowLocalOriginFailure) { std::shared_ptr detector(DetectorImpl::create( cluster_, outlier_detection_split_, dispatcher_, runtime_, time_system_, event_logger_)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_local_origin_failure", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveLocalOriginFailureRuntime, 100)) .WillByDefault(Return(true)); detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); @@ -592,7 +616,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowLocalOriginFailure) { time_system_.setMonotonicTime(std::chrono::milliseconds(0)); // Get the configured number of failures and simulate than number of connect failures. - uint32_t n = runtime_.snapshot_.getInteger("outlier_detection.consecutive_local_origin_failure", + uint32_t n = runtime_.snapshot_.getInteger(ConsecutiveLocalOriginFailureRuntime, detector->config().consecutiveLocalOriginFailure()); while (n--) { hosts_[0]->outlierDetector().putResult(Result::LocalOriginConnectFailed); @@ -619,7 +643,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowLocalOriginFailure) { // Simulate few connect failures, not enough for ejection and then simulate connect success // and again few failures not enough for ejection. - n = runtime_.snapshot_.getInteger("outlier_detection.consecutive_local_origin_failure", + n = runtime_.snapshot_.getInteger(ConsecutiveLocalOriginFailureRuntime, detector->config().consecutiveLocalOriginFailure()); n--; // make sure that this is not enough for ejection. while (n--) { @@ -661,8 +685,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowGatewayFailureAnd5xx) { std::shared_ptr detector(DetectorImpl::create( cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_gateway_failure", 0)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveGatewayFailureRuntime, 0)) .WillByDefault(Return(true)); detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); @@ -754,10 +777,9 @@ TEST_F(OutlierDetectorImplTest, BasicFlowNonHttpCodesExternalOrigin) { addHosts({"tcp://127.0.0.1:81"}); cluster_.prioritySet().getMockHostSet(0)->runCallbacks({hosts_[1]}, {}); - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_consecutive_5xx", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutive5xxRuntime, 100)) .WillByDefault(Return(true)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_gateway_failure", 0)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveGatewayFailureRuntime, 0)) .WillByDefault(Return(false)); // Make sure that EXT_ORIGIN_REQUEST_SUCCESS cancels LOCAL_ORIGIN_CONNECT_FAILED @@ -804,10 +826,9 @@ TEST_F(OutlierDetectorImplTest, BasicFlowSuccessRateExternalOrigin) { detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); // Turn off 5xx detection to test SR detection in isolation. - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_consecutive_5xx", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutive5xxRuntime, 100)) .WillByDefault(Return(false)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_gateway_failure", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveGatewayFailureRuntime, 100)) .WillByDefault(Return(false)); // Expect non-enforcing logging to happen every time the consecutive_5xx_ counter // gets saturated (every 5 times). @@ -828,7 +849,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowSuccessRateExternalOrigin) { EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[4]), _, envoy::data::cluster::v2alpha::SUCCESS_RATE, true)); EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.success_rate_stdev_factor", 1900)) + ON_CALL(runtime_.snapshot_, getInteger(SuccessRateStdevFactorRuntime, 1900)) .WillByDefault(Return(1900)); interval_timer_->invokeCallback(); EXPECT_EQ(50, hosts_[4]->outlierDetector().successRate( @@ -935,8 +956,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowSuccessRateLocalOrigin) { detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); // Turn off detecting consecutive local origin failures. - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_local_origin_failure", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveLocalOriginFailureRuntime, 100)) .WillByDefault(Return(false)); // Expect non-enforcing logging to happen every time the consecutive_ counter // gets saturated (every 5 times). @@ -954,7 +974,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowSuccessRateLocalOrigin) { logEject(std::static_pointer_cast(hosts_[4]), _, envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN, true)); EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.success_rate_stdev_factor", 1900)) + ON_CALL(runtime_.snapshot_, getInteger(SuccessRateStdevFactorRuntime, 1900)) .WillByDefault(Return(1900)); interval_timer_->invokeCallback(); EXPECT_EQ(50, hosts_[4]->outlierDetector().successRate( @@ -1025,7 +1045,7 @@ TEST_F(OutlierDetectorImplTest, EmptySuccessRate) { time_system_.setMonotonicTime(std::chrono::milliseconds(10000)); EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.success_rate_minimum_hosts", 5)) + ON_CALL(runtime_.snapshot_, getInteger(SuccessRateMinimumHostsRuntime, 5)) .WillByDefault(Return(0)); interval_timer_->invokeCallback(); } @@ -1046,15 +1066,14 @@ TEST_F(OutlierDetectorImplTest, BasicFlowFailurePercentageExternalOrigin) { detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); // Turn off 5xx detection and SR detection to test failure percentage detection in isolation. - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_consecutive_5xx", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutive5xxRuntime, 100)) .WillByDefault(Return(false)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_gateway_failure", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveGatewayFailureRuntime, 100)) .WillByDefault(Return(false)); - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_success_rate", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingSuccessRateRuntime, 100)) .WillByDefault(Return(false)); // Now turn on failure percentage detection. - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_failure_percentage", 0)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingFailurePercentageRuntime, 0)) .WillByDefault(Return(true)); // Expect non-enforcing logging to happen every time the consecutive_5xx_ counter // gets saturated (every 5 times). @@ -1085,7 +1104,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowFailurePercentageExternalOrigin) { EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[4]), _, envoy::data::cluster::v2alpha::FAILURE_PERCENTAGE, true)); EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.success_rate_stdev_factor", 1900)) + ON_CALL(runtime_.snapshot_, getInteger(SuccessRateStdevFactorRuntime, 1900)) .WillByDefault(Return(1900)); interval_timer_->invokeCallback(); EXPECT_FLOAT_EQ(100.0 * (50.0 / 300.0), @@ -1166,15 +1185,12 @@ TEST_F(OutlierDetectorImplTest, BasicFlowFailurePercentageLocalOrigin) { detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); // Turn off 5xx detection and SR detection to test failure percentage detection in isolation. - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_consecutive_local_origin_failure", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutiveLocalOriginFailureRuntime, 100)) .WillByDefault(Return(false)); - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_local_origin_success_rate", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingLocalOriginSuccessRateRuntime, 100)) .WillByDefault(Return(false)); // Now turn on failure percentage detection. - ON_CALL(runtime_.snapshot_, - featureEnabled("outlier_detection.enforcing_failure_percentage_local_origin", 0)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingFailurePercentageLocalOriginRuntime, 0)) .WillByDefault(Return(true)); // Expect non-enforcing logging to happen every time the consecutive_ counter // gets saturated (every 5 times). @@ -1196,7 +1212,7 @@ TEST_F(OutlierDetectorImplTest, BasicFlowFailurePercentageLocalOrigin) { logEject(std::static_pointer_cast(hosts_[4]), _, envoy::data::cluster::v2alpha::SUCCESS_RATE_LOCAL_ORIGIN, false)); EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.failure_percentage_threshold", 85)) + ON_CALL(runtime_.snapshot_, getInteger(FailurePercentageThresholdRuntime, 85)) .WillByDefault(Return(40)); interval_timer_->invokeCallback(); EXPECT_EQ(50, hosts_[4]->outlierDetector().successRate( @@ -1294,8 +1310,7 @@ TEST_F(OutlierDetectorImplTest, Overflow) { cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.max_ejection_percent", _)) - .WillByDefault(Return(1)); + ON_CALL(runtime_.snapshot_, getInteger(MaxEjectionPercentRuntime, _)).WillByDefault(Return(1)); loadRq(hosts_[0], 4, 500); @@ -1324,7 +1339,7 @@ TEST_F(OutlierDetectorImplTest, NotEnforcing) { loadRq(hosts_[0], 4, 503); - ON_CALL(runtime_.snapshot_, featureEnabled("outlier_detection.enforcing_consecutive_5xx", 100)) + ON_CALL(runtime_.snapshot_, featureEnabled(EnforcingConsecutive5xxRuntime, 100)) .WillByDefault(Return(false)); EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, false)); @@ -1364,8 +1379,7 @@ TEST_F(OutlierDetectorImplTest, EjectionActiveValueIsAccountedWithoutMetricStora cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); - ON_CALL(runtime_.snapshot_, getInteger("outlier_detection.max_ejection_percent", _)) - .WillByDefault(Return(1)); + ON_CALL(runtime_.snapshot_, getInteger(MaxEjectionPercentRuntime, _)).WillByDefault(Return(1)); loadRq(hosts_[0], 4, 500); @@ -1489,6 +1503,359 @@ TEST_F(OutlierDetectorImplTest, Consecutive_5xxAlreadyEjected) { loadRq(hosts_[0], 5, 500); } +// Test verifies that ejection time increases each time the node is ejected, +// and decreases when node stays healthy. +// The test outline is as follows: +// - eject the node for the first time. It should be brought back in 10 secs +// - eject the node the second time. It should be brought back in 20 secs +// - eject the node the third time. It should be brought back in 30 secs +// - for the next two intervals the node is healthy, which should +// bring ejection time down. +// - eject the node again. It should be brought back in 20 secs. +// - simulate long period of time when the node is healthy. +// - eject the node again. It should be brought back after 10 secs. +TEST_F(OutlierDetectorImplTest, EjectTimeBackoff) { + // Setup base ejection time to 10 secs. + ON_CALL(runtime_.snapshot_, getInteger(BaseEjectionTimeMsRuntime, _)) + .WillByDefault(Return(10000UL)); + EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); + addHosts({"tcp://127.0.0.1:80"}); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + std::shared_ptr detector(DetectorImpl::create( + cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); + detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); + + // Eject the node by consecutive 5xx errors. + time_system_.setMonotonicTime(std::chrono::seconds(0)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Ejection base time is 10 secs. The node has been ejected just once. + // It should be brought back after 10 secs. + time_system_.setMonotonicTime(std::chrono::seconds(10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + + // Cause ejection again. + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // This is the second ejection in the row. + // Node should stay ejected for twice the base_ejection_time: 20 secs. + time_system_.setMonotonicTime(std::chrono::seconds(20)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + // Make sure that node stays ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + time_system_.setMonotonicTime(std::chrono::seconds(30)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + + // Third ejection in the row. It starts at 30 secs. The node should be ejected for 3*10 secs. + // It should not be brought back until 60 secs from the start of the test. + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Node should stay ejected after 10 secs of ejection time. + time_system_.setMonotonicTime(std::chrono::seconds(40)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Node should stay ejected after 20 secs of ejection time. + time_system_.setMonotonicTime(std::chrono::seconds(50)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Node should be brought back after being ejected for 30 secs. + time_system_.setMonotonicTime(std::chrono::seconds(60)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + + // During the next 2 timer intervals, the node is healthy. This should decrease + // the eject time backoff. + time_system_.setMonotonicTime(std::chrono::seconds(70)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + + time_system_.setMonotonicTime(std::chrono::seconds(80)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + + // Trigger the next ejection. The node should be ejected for 20 secs. + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Node should stay ejected after 10 secs. + time_system_.setMonotonicTime(std::chrono::seconds(90)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Node should be brought back after being ejected for 20 secs. + time_system_.setMonotonicTime(std::chrono::seconds(100)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + + // Now simulate long period of no errors. + // The node will not be ejected and the eject backoff time should + // drop to the initial value of 1 * base_ejection_time. + for (auto i = 1; i <= 50; i++) { + time_system_.setMonotonicTime(std::chrono::seconds(100 + i * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + } + + // Trigger ejection. + time_system_.setMonotonicTime(std::chrono::seconds(610)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // The node should be brought back after 10 secs. + time_system_.setMonotonicTime(std::chrono::seconds(620)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); +} + +// Test that ejection time does not increase beyond maximum. +// Test outline: +// - max_ejection_time is 30 times longer than base_ejection_time. +// - simulate 30 ejections. Each time the node is ejected, the ejection time is +// longer. The last ejection time is equal to max_ejection_time. +// - eject node again. Ejection time should not increase beyond max_ejection_time. +TEST_F(OutlierDetectorImplTest, MaxEjectTime) { + // Setup base ejection time to 10 secs. + ON_CALL(runtime_.snapshot_, getInteger(BaseEjectionTimeMsRuntime, _)) + .WillByDefault(Return(10000UL)); + // Setup max ejection time to 30 secs. + ON_CALL(runtime_.snapshot_, getInteger(MaxEjectionTimeMsRuntime, _)) + .WillByDefault(Return(300000UL)); + EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); + addHosts({"tcp://127.0.0.1:80"}); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + std::shared_ptr detector(DetectorImpl::create( + cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); + detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); + + // Verify that maximum_ejection_time caps ejection time. + // Base ejection time is 10s. Max ejection time is 300s. + // It will take 30 ejections to reach the maximum, Beyond that, ejection time should stay + // the same + uint32_t eject_tick = 0; + time_system_.setMonotonicTime(std::chrono::seconds(0)); + // Trigger 30 ejection. + // For each ejection, time to uneject increases. + for (auto i = 1; i <= 30; i++) { + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Simulate several check intervals. For each check the node should stay ejected. + for (auto j = 1; j < i; j++) { + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + } + + // Wait for unejection. + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + } + + // Keep ejecting the node. Ejection time should not increase. + for (auto i = 1; i < 10; i++) { + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Move the time 290s ahead. The ejection should not happen. + for (auto j = 1; j <= 29; j++) { + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + } + + // Node should be brought back after 300 secs. + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been ejected. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + } +} + +// Test that maximum ejection time logic behaves properly when +// max_ejection_time is not multitude of base_ejection_time. +// The same test as MaxEjectTime, but with config where +// max_ejection_time is not multiple of base_ejection_time. +// Because ejection time increases in base_ejection_time intervals, +// the maximum ejection time will be equal to +// max_ejection_time + base_ejection_time. +TEST_F(OutlierDetectorImplTest, MaxEjectTimeNotAlligned) { + // Setup interval time to 10 secs. + ON_CALL(runtime_.snapshot_, getInteger(IntervalMsRuntime, _)).WillByDefault(Return(10000UL)); + ON_CALL(runtime_.snapshot_, getInteger(BaseEjectionTimeMsRuntime, _)) + .WillByDefault(Return(10000UL)); + ON_CALL(runtime_.snapshot_, getInteger(MaxEjectionTimeMsRuntime, _)) + .WillByDefault(Return(305000UL)); + EXPECT_CALL(cluster_.prioritySet(), addMemberUpdateCb(_)); + addHosts({"tcp://127.0.0.1:80"}); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + std::shared_ptr detector(DetectorImpl::create( + cluster_, empty_outlier_detection_, dispatcher_, runtime_, time_system_, event_logger_)); + detector->addChangedStateCb([&](HostSharedPtr host) -> void { checker_.check(host); }); + + // Verify that maximum_ejection_time caps ejection time. + // Base ejection time is 10s. Max ejection time is 305s. + uint32_t eject_tick = 0; + time_system_.setMonotonicTime(std::chrono::seconds(0)); + // Trigger 31 ejections in a row. + // For each ejection, time to uneject increases. + for (auto i = 1; i <= 31; i++) { + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Simulate several intervals. For check the node should stay ejected. + for (auto j = 1; j < i; j++) { + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + } + + // Wait for unejection. + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + } + + // Keep ejecting the node. Ejection time should not increase. + for (auto i = 1; i < 10; i++) { + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, logEject(std::static_pointer_cast(hosts_[0]), + _, envoy::data::cluster::v2alpha::CONSECUTIVE_5XX, true)); + loadRq(hosts_[0], 5, 500); + // Make sure that node has been ejected. + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + + // Move the time 300s ahead. The node should stay ejected. + for (auto j = 1; j <= 30; j++) { + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + interval_timer_->invokeCallback(); + EXPECT_TRUE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(1UL, outlier_detection_ejections_active_.value()); + } + + // Move time one base_ejection_time beyond max_ejection_time. + // Wait for unejection. + time_system_.setMonotonicTime(std::chrono::seconds(++eject_tick * 10)); + EXPECT_CALL(*interval_timer_, enableTimer(std::chrono::milliseconds(10000), _)); + EXPECT_CALL(checker_, check(hosts_[0])); + EXPECT_CALL(*event_logger_, + logUneject(std::static_pointer_cast(hosts_[0]))); + interval_timer_->invokeCallback(); + // Make sure that node has been brought back. + EXPECT_FALSE(hosts_[0]->healthFlagGet(Host::HealthFlag::FAILED_OUTLIER_CHECK)); + EXPECT_EQ(0UL, outlier_detection_ejections_active_.value()); + } +} + TEST(DetectorHostMonitorNullImplTest, All) { DetectorHostMonitorNullImpl null_sink; diff --git a/test/common/upstream/random_load_balancer_corpus/random_256_ports b/test/common/upstream/random_load_balancer_corpus/random_256_ports index e2792414cfe8..d2bfe9a75b36 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_256_ports +++ b/test/common/upstream/random_load_balancer_corpus/random_256_ports @@ -10,12 +10,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_NoHosts b/test/common/upstream/random_load_balancer_corpus/random_NoHosts index 63b10ab1aa3d..bae1ee957bfe 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_NoHosts +++ b/test/common/upstream/random_load_balancer_corpus/random_NoHosts @@ -3,7 +3,7 @@ common_lb_config { } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_Normal b/test/common/upstream/random_load_balancer_corpus/random_Normal index 66bff38e17c6..53b6a095d812 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_Normal +++ b/test/common/upstream/random_load_balancer_corpus/random_Normal @@ -13,12 +13,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_crash-55abbf82c64b5a62e299b93d7b254045471199c9 b/test/common/upstream/random_load_balancer_corpus/random_crash-55abbf82c64b5a62e299b93d7b254045471199c9 index ca01834b3116..d5c2d10abd6a 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_crash-55abbf82c64b5a62e299b93d7b254045471199c9 +++ b/test/common/upstream/random_load_balancer_corpus/random_crash-55abbf82c64b5a62e299b93d7b254045471199c9 @@ -10,12 +10,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_crash-ba5efdfd9c412a8507087120783fe6529b1ac0cb b/test/common/upstream/random_load_balancer_corpus/random_crash-ba5efdfd9c412a8507087120783fe6529b1ac0cb index 65c8062d59c0..833d901555f5 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_crash-ba5efdfd9c412a8507087120783fe6529b1ac0cb +++ b/test/common/upstream/random_load_balancer_corpus/random_crash-ba5efdfd9c412a8507087120783fe6529b1ac0cb @@ -11,11 +11,11 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } actions { diff --git a/test/common/upstream/random_load_balancer_corpus/random_largest-port-value b/test/common/upstream/random_load_balancer_corpus/random_largest-port-value index a89ecba1b8de..7914b196b7bf 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_largest-port-value +++ b/test/common/upstream/random_load_balancer_corpus/random_largest-port-value @@ -7,11 +7,11 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } actions { diff --git a/test/common/upstream/random_load_balancer_corpus/random_many_choose_hosts b/test/common/upstream/random_load_balancer_corpus/random_many_choose_hosts index 53a69c852f58..703db4cc5e83 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_many_choose_hosts +++ b/test/common/upstream/random_load_balancer_corpus/random_many_choose_hosts @@ -10,12 +10,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_max_ports b/test/common/upstream/random_load_balancer_corpus/random_max_ports index 73e56d8d7637..13c3e188f5fd 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_max_ports +++ b/test/common/upstream/random_load_balancer_corpus/random_max_ports @@ -13,12 +13,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_overflowing_ports b/test/common/upstream/random_load_balancer_corpus/random_overflowing_ports index 4ec8cd27d2ac..b3fd1317b6c2 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_overflowing_ports +++ b/test/common/upstream/random_load_balancer_corpus/random_overflowing_ports @@ -13,12 +13,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_slow-unit-eed4596101efb3e737f736c8d5bcd4f0815a8728 b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-eed4596101efb3e737f736c8d5bcd4f0815a8728 index b8f81a4d451c..5a13e5314e5f 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_slow-unit-eed4596101efb3e737f736c8d5bcd4f0815a8728 +++ b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-eed4596101efb3e737f736c8d5bcd4f0815a8728 @@ -14,11 +14,11 @@ load_balancer_test_case { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } actions { diff --git a/test/common/upstream/random_load_balancer_corpus/random_slow-unit-test b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-test index 1abe2824c65b..f45e356e55bd 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_slow-unit-test +++ b/test/common/upstream/random_load_balancer_corpus/random_slow-unit-test @@ -14,11 +14,11 @@ load_balancer_test_case { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } actions { diff --git a/test/common/upstream/random_load_balancer_corpus/random_test_something b/test/common/upstream/random_load_balancer_corpus/random_test_something index 7025e0fed767..a183ec60ac4a 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_test_something +++ b/test/common/upstream/random_load_balancer_corpus/random_test_something @@ -13,12 +13,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_with-locality b/test/common/upstream/random_load_balancer_corpus/random_with-locality index 15adcc4de667..0a5507eddb09 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_with-locality +++ b/test/common/upstream/random_load_balancer_corpus/random_with-locality @@ -15,12 +15,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_with-locality-high-number-of-hosts b/test/common/upstream/random_load_balancer_corpus/random_with-locality-high-number-of-hosts index 2a96d688b5d6..76a5807a157f 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_with-locality-high-number-of-hosts +++ b/test/common/upstream/random_load_balancer_corpus/random_with-locality-high-number-of-hosts @@ -15,12 +15,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/random_load_balancer_corpus/random_with_locality-50000-hosts b/test/common/upstream/random_load_balancer_corpus/random_with_locality-50000-hosts index 7a1784375405..7f0604103ed4 100644 --- a/test/common/upstream/random_load_balancer_corpus/random_with_locality-50000-hosts +++ b/test/common/upstream/random_load_balancer_corpus/random_with_locality-50000-hosts @@ -15,12 +15,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/round_robin_load_balancer_corpus/round_robin-high-number-of-hosts b/test/common/upstream/round_robin_load_balancer_corpus/round_robin-high-number-of-hosts index c0c794caa55f..7b673ee9511a 100644 --- a/test/common/upstream/round_robin_load_balancer_corpus/round_robin-high-number-of-hosts +++ b/test/common/upstream/round_robin_load_balancer_corpus/round_robin-high-number-of-hosts @@ -16,12 +16,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/round_robin_load_balancer_corpus/round_robin-with-locality-high-number-of-hosts b/test/common/upstream/round_robin_load_balancer_corpus/round_robin-with-locality-high-number-of-hosts index c3e6091cf7ff..a4817438717b 100644 --- a/test/common/upstream/round_robin_load_balancer_corpus/round_robin-with-locality-high-number-of-hosts +++ b/test/common/upstream/round_robin_load_balancer_corpus/round_robin-with-locality-high-number-of-hosts @@ -16,12 +16,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority index f3502762e74a..3c40bd2ca592 100644 --- a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority +++ b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority @@ -12,12 +12,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority_update_hosts b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority_update_hosts index 99fe34c09ed0..568a03dd7374 100644 --- a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority_update_hosts +++ b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_local_priority_update_hosts @@ -12,12 +12,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_no_hosts b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_no_hosts index 41e64fbf19b1..9ae1ed058c36 100644 --- a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_no_hosts +++ b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_no_hosts @@ -4,7 +4,7 @@ common_lb_config { } actions { - prefetch { + preconnect { } } diff --git a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_normal b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_normal index 6086e00c5d87..dd4b921149a6 100644 --- a/test/common/upstream/round_robin_load_balancer_corpus/round_robin_normal +++ b/test/common/upstream/round_robin_load_balancer_corpus/round_robin_normal @@ -11,12 +11,12 @@ actions { } } actions { - prefetch { + preconnect { } } actions { - prefetch { + preconnect { } } diff --git a/test/config/BUILD b/test/config/BUILD index 1369a7b3afce..1f9d463b5b03 100644 --- a/test/config/BUILD +++ b/test/config/BUILD @@ -25,6 +25,7 @@ envoy_cc_test_library( "//test/integration:server_stats_interface", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", + "//test/test_common:printers_lib", "//test/test_common:resources_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", diff --git a/test/dependencies/curl_test.cc b/test/dependencies/curl_test.cc index 36bf8e149ed2..e046db65a95d 100644 --- a/test/dependencies/curl_test.cc +++ b/test/dependencies/curl_test.cc @@ -40,7 +40,7 @@ TEST(CurlTest, BuiltWithExpectedFeatures) { EXPECT_EQ(0, info->features & CURL_VERSION_HTTPS_PROXY); EXPECT_EQ(0, info->features & CURL_VERSION_MULTI_SSL); EXPECT_EQ(0, info->features & CURL_VERSION_BROTLI); - EXPECT_EQ(0, info->features & CURL_VERSION_ALTSVC); + EXPECT_NE(0, info->features & CURL_VERSION_ALTSVC); EXPECT_EQ(0, info->features & CURL_VERSION_HTTP3); EXPECT_NE(0, info->ares_num); } diff --git a/test/extensions/bootstrap/wasm/wasm_speed_test.cc b/test/extensions/bootstrap/wasm/wasm_speed_test.cc index 6d39d399fb89..9dbfd82911eb 100644 --- a/test/extensions/bootstrap/wasm/wasm_speed_test.cc +++ b/test/extensions/bootstrap/wasm/wasm_speed_test.cc @@ -37,7 +37,7 @@ class TestRoot : public Envoy::Extensions::Common::Wasm::Context { log_(static_cast(level), message); return proxy_wasm::WasmResult::Ok; } - MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); + MOCK_METHOD(void, log_, (spdlog::level::level_enum level, absl::string_view message)); }; static void bmWasmSimpleCallSpeedTest(benchmark::State& state, std::string test, diff --git a/test/extensions/bootstrap/wasm/wasm_test.cc b/test/extensions/bootstrap/wasm/wasm_test.cc index 757b086770b6..2eaf5083095a 100644 --- a/test/extensions/bootstrap/wasm/wasm_test.cc +++ b/test/extensions/bootstrap/wasm/wasm_test.cc @@ -32,7 +32,7 @@ class TestContext : public Extensions::Common::Wasm::Context { log_(static_cast(level), message); return proxy_wasm::WasmResult::Ok; } - MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); + MOCK_METHOD(void, log_, (spdlog::level::level_enum level, absl::string_view message)); }; class WasmTestBase { diff --git a/test/extensions/common/wasm/wasm_test.cc b/test/extensions/common/wasm/wasm_test.cc index 19e5b016ae72..56266c0fb64d 100644 --- a/test/extensions/common/wasm/wasm_test.cc +++ b/test/extensions/common/wasm/wasm_test.cc @@ -79,7 +79,7 @@ class TestContext : public ::Envoy::Extensions::Common::Wasm::Context { Extensions::Common::Wasm::Context::log(static_cast(level), message); return proxy_wasm::WasmResult::Ok; } - MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); + MOCK_METHOD(void, log_, (spdlog::level::level_enum level, absl::string_view message)); }; class WasmCommonTest : public testing::TestWithParam { diff --git a/test/extensions/common/wasm/wasm_vm_test.cc b/test/extensions/common/wasm/wasm_vm_test.cc index 728d15b07709..43f6f0ac927c 100644 --- a/test/extensions/common/wasm/wasm_vm_test.cc +++ b/test/extensions/common/wasm/wasm_vm_test.cc @@ -61,10 +61,9 @@ TEST_F(BaseVmTest, NullVmStartup) { auto wasm_vm_clone = wasm_vm->clone(); EXPECT_TRUE(wasm_vm_clone != nullptr); EXPECT_TRUE(wasm_vm->getCustomSection("user").empty()); - EXPECT_EQ(getEnvoyWasmIntegration(*wasm_vm).runtime(), "envoy.wasm.runtime.null"); + EXPECT_EQ(wasm_vm->runtime(), "null"); std::function f; - EXPECT_FALSE( - getEnvoyWasmIntegration(*wasm_vm).getNullVmFunction("bad_function", false, 0, nullptr, &f)); + EXPECT_FALSE(wasm_vm->integration()->getNullVmFunction("bad_function", false, 0, nullptr, &f)); } TEST_F(BaseVmTest, NullVmMemory) { diff --git a/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc b/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc index d37bd313b090..63f7fb159387 100644 --- a/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc +++ b/test/extensions/filters/http/grpc_http1_bridge/http1_bridge_filter_test.cc @@ -33,7 +33,7 @@ class GrpcHttp1BridgeFilterTest : public testing::Test { ~GrpcHttp1BridgeFilterTest() override { filter_.onDestroy(); } - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Grpc::ContextImpl context_; Http1BridgeFilter filter_; NiceMock decoder_callbacks_; diff --git a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc index b864c8574bab..622128615778 100644 --- a/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc +++ b/test/extensions/filters/http/grpc_web/grpc_web_filter_test.cc @@ -103,7 +103,7 @@ class GrpcWebFilterTest : public testing::TestWithParam decoder_callbacks_; diff --git a/test/extensions/filters/http/oauth2/filter_test.cc b/test/extensions/filters/http/oauth2/filter_test.cc index fae1d1676efb..dd5fafd64670 100644 --- a/test/extensions/filters/http/oauth2/filter_test.cc +++ b/test/extensions/filters/http/oauth2/filter_test.cc @@ -174,7 +174,7 @@ TEST_F(OAuth2Test, RequestSignout) { }; EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); - EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer, + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers, false)); } @@ -255,7 +255,7 @@ TEST_F(OAuth2Test, OAuthErrorNonOAuthHttpCallback) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); - EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer, + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers, false)); } @@ -281,7 +281,7 @@ TEST_F(OAuth2Test, OAuthErrorQueryString) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&response_headers), false)); EXPECT_CALL(decoder_callbacks_, encodeData(_, true)); - EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer, + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers, false)); EXPECT_EQ(scope_.counterFromString("test.oauth_failure").value(), 1); @@ -356,6 +356,7 @@ TEST_F(OAuth2Test, CookieValidator) { }; auto cookie_validator = std::make_shared(test_time_); + EXPECT_EQ(cookie_validator->token(), ""); cookie_validator->setParams(request_headers, "mock-secret"); EXPECT_TRUE(cookie_validator->hmacIsValid()); @@ -421,7 +422,7 @@ TEST_F(OAuth2Test, OAuthTestInvalidUrlInStateQueryParam) { EXPECT_CALL(*validator_, token()).WillRepeatedly(ReturnRef(legit_token)); EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_headers), false)); - EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer, + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers, false)); } @@ -455,7 +456,7 @@ TEST_F(OAuth2Test, OAuthTestCallbackUrlInStateQueryParam) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&expected_response_headers), false)); - EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer, + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers, false)); Http::TestRequestHeaderMapImpl final_request_headers{ @@ -564,7 +565,7 @@ TEST_F(OAuth2Test, OAuthTestFullFlowPostWithParameters) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&first_response_headers), true)); // This represents the beginning of the OAuth filter. - EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndBuffer, + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, filter_->decodeHeaders(first_request_headers, false)); // This represents the callback request from the authorization server. @@ -588,6 +589,7 @@ TEST_F(OAuth2Test, OAuthTestFullFlowPostWithParameters) { filter_->decodeHeaders(second_request_headers, false)); EXPECT_EQ(1, config_->stats().oauth_unauthorized_rq_.value()); + EXPECT_EQ(config_->clusterName(), "auth.example.com"); // Expected response after the callback & validation is complete - verifying we kept the // state and method of the original request, including the query string parameters. diff --git a/test/extensions/filters/network/common/redis/BUILD b/test/extensions/filters/network/common/redis/BUILD index 08509ae643f1..5bb47f0c7e49 100644 --- a/test/extensions/filters/network/common/redis/BUILD +++ b/test/extensions/filters/network/common/redis/BUILD @@ -18,6 +18,7 @@ envoy_cc_mock( "//source/common/common:assert_lib", "//source/extensions/filters/network/common/redis:client_lib", "//source/extensions/filters/network/common/redis:codec_lib", + "//test/test_common:printers_lib", ], ) diff --git a/test/extensions/filters/network/redis_proxy/BUILD b/test/extensions/filters/network/redis_proxy/BUILD index c69e8106aca3..79e3d11e1969 100644 --- a/test/extensions/filters/network/redis_proxy/BUILD +++ b/test/extensions/filters/network/redis_proxy/BUILD @@ -99,6 +99,7 @@ envoy_cc_mock( "//source/extensions/filters/network/redis_proxy:command_splitter_interface", "//source/extensions/filters/network/redis_proxy:conn_pool_interface", "//source/extensions/filters/network/redis_proxy:router_interface", + "//test/test_common:printers_lib", ], ) diff --git a/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc b/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc index db9f4108aedd..adddcd737dc7 100644 --- a/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc +++ b/test/extensions/stats_sinks/wasm/wasm_stat_sink_test.cc @@ -28,7 +28,7 @@ class TestContext : public ::Envoy::Extensions::Common::Wasm::Context { Extensions::Common::Wasm::Context::log(static_cast(level), message); return proxy_wasm::WasmResult::Ok; } - MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)); + MOCK_METHOD(void, log_, (spdlog::level::level_enum level, absl::string_view message)); }; class WasmCommonContextTest diff --git a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc index 011030dff5a4..1c7a49fe4255 100644 --- a/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc +++ b/test/extensions/tracers/common/ot/opentracing_driver_impl_test.cc @@ -215,6 +215,18 @@ TEST_F(OpenTracingDriverTest, ExtractWithUnindexedHeader) { EXPECT_EQ(spans.at(1).span_context.span_id, spans.at(0).references.at(0).span_id); } +TEST_F(OpenTracingDriverTest, GetTraceId) { + setupValidDriver(); + + Tracing::SpanPtr first_span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + first_span->setTag("abc", "123"); + first_span->finishSpan(); + + // This method is unimplemented and a noop. + ASSERT_EQ(first_span->getTraceIdAsHex(), ""); +} + } // namespace } // namespace Ot } // namespace Common diff --git a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc index 6513314c0ba0..3d49ba486de6 100644 --- a/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc +++ b/test/extensions/tracers/lightstep/lightstep_tracer_impl_test.cc @@ -119,7 +119,7 @@ class LightStepDriverTest : public testing::Test { SystemTime start_time_; StreamInfo::MockStreamInfo stream_info_; - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Grpc::ContextImpl grpc_context_; NiceMock tls_; NiceMock stats_; @@ -723,6 +723,15 @@ TEST_F(LightStepDriverTest, GetAndSetBaggage) { EXPECT_EQ(span->getBaggage(key), value); } +TEST_F(LightStepDriverTest, GetTraceId) { + setupValidDriver(); + Tracing::SpanPtr span = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + + // This method is unimplemented and a noop. + ASSERT_EQ(span->getTraceIdAsHex(), ""); +} + } // namespace } // namespace Lightstep } // namespace Tracers diff --git a/test/extensions/tracers/opencensus/tracer_test.cc b/test/extensions/tracers/opencensus/tracer_test.cc index 3a5e09fbf58d..67cba33bfb85 100644 --- a/test/extensions/tracers/opencensus/tracer_test.cc +++ b/test/extensions/tracers/opencensus/tracer_test.cc @@ -127,6 +127,9 @@ TEST(OpenCensusTracerTest, Span) { // Baggage methods are a noop in opencensus and won't affect events. span->setBaggage("baggage_key", "baggage_value"); ASSERT_EQ("", span->getBaggage("baggage_key")); + + // Trace id is automatically created when no parent context exists. + ASSERT_NE(span->getTraceIdAsHex(), ""); } // Retrieve SpanData from the OpenCensus trace exporter. @@ -217,6 +220,10 @@ void testIncomingHeaders( {Tracing::Reason::Sampling, false}); span->injectContext(injected_headers); span->finishSpan(); + + // Check contents via public API. + // Trace id is set via context propagation headers. + EXPECT_EQ(span->getTraceIdAsHex(), "404142434445464748494a4b4c4d4e4f"); } // Retrieve SpanData from the OpenCensus trace exporter. @@ -225,7 +232,7 @@ void testIncomingHeaders( const auto& sd = spans[0]; ENVOY_LOG_MISC(debug, "{}", sd.DebugString()); - // Check contents. + // Check contents by inspecting private span data. EXPECT_TRUE(sd.has_remote_parent()); EXPECT_EQ("6162636465666768", sd.parent_span_id().ToHex()); EXPECT_EQ("404142434445464748494a4b4c4d4e4f", sd.context().trace_id().ToHex()); diff --git a/test/extensions/tracers/skywalking/tracer_test.cc b/test/extensions/tracers/skywalking/tracer_test.cc index 843421740d72..b6c169487b35 100644 --- a/test/extensions/tracers/skywalking/tracer_test.cc +++ b/test/extensions/tracers/skywalking/tracer_test.cc @@ -97,6 +97,9 @@ TEST_F(TracerTest, TracerTestCreateNewSpanWithNoPropagationHeaders) { EXPECT_EQ("", span->getBaggage("FakeStringAndNothingToDo")); span->setBaggage("FakeStringAndNothingToDo", "FakeStringAndNothingToDo"); + // This method is unimplemented and a noop. + ASSERT_EQ(span->getTraceIdAsHex(), ""); + // Test whether the basic functions of Span are normal. span->setSampled(false); diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 526cab8e9511..3ff68ddbdddc 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -120,6 +120,16 @@ TEST_F(XRayTracerTest, BaggageNotImplemented) { ASSERT_EQ("", span->getBaggage("baggage_key")); } +TEST_F(XRayTracerTest, GetTraceId) { + Tracer tracer{"" /*span name*/, "" /*origin*/, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.createNonSampledSpan(); + span->finishSpan(); + + // This method is unimplemented and a noop. + ASSERT_EQ(span->getTraceIdAsHex(), ""); +} + TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { NiceMock config; constexpr auto expected_span_name = "Service 1"; diff --git a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc index 234e97cbc396..07fe8035ab3e 100644 --- a/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc +++ b/test/extensions/tracers/zipkin/zipkin_tracer_impl_test.cc @@ -709,6 +709,13 @@ TEST_F(ZipkinDriverTest, ZipkinSpanTest) { start_time_, {Tracing::Reason::Sampling, true}); span5->setBaggage("baggage_key", "baggage_value"); EXPECT_EQ("", span5->getBaggage("baggage_key")); + + // ==== + // Test trace id noop + // ==== + Tracing::SpanPtr span6 = driver_->startSpan(config_, request_headers_, operation_name_, + start_time_, {Tracing::Reason::Sampling, true}); + EXPECT_EQ(span6->getTraceIdAsHex(), ""); } TEST_F(ZipkinDriverTest, ZipkinSpanContextFromB3HeadersTest) { diff --git a/test/extensions/transport_sockets/starttls/BUILD b/test/extensions/transport_sockets/starttls/BUILD index 0b38b0b7aa56..a54e9b757c20 100644 --- a/test/extensions/transport_sockets/starttls/BUILD +++ b/test/extensions/transport_sockets/starttls/BUILD @@ -40,8 +40,6 @@ envoy_extension_cc_test( "//test/config/integration/certs", ], extension_name = "envoy.transport_sockets.starttls", - # TODO(envoyproxy/windows-dev): Investigate timeout - tags = ["flaky_on_windows"], deps = [ ":starttls_integration_proto_cc_proto", "//source/extensions/filters/network/tcp_proxy:config", diff --git a/test/extensions/transport_sockets/starttls/starttls_integration_test.cc b/test/extensions/transport_sockets/starttls/starttls_integration_test.cc index b88d4b4d8b72..54394a8881ec 100644 --- a/test/extensions/transport_sockets/starttls/starttls_integration_test.cc +++ b/test/extensions/transport_sockets/starttls/starttls_integration_test.cc @@ -272,15 +272,13 @@ TEST_P(StartTlsIntegrationTest, SwitchToTlsFromClient) { // Send a message to switch to tls on the receiver side. // StartTlsSwitchFilter will switch transport socket on the - // receiver side upon receiving "switch" message. + // receiver side upon receiving "switch" message and send + // back the message "usetls". + payload_reader_->set_data_to_wait_for("usetls"); buffer.add("switch"); conn_->write(buffer, false); - while (client_write_buffer_->bytesDrained() != 11) { - dispatcher_->run(Event::Dispatcher::RunType::NonBlock); - } // Wait for confirmation - payload_reader_->set_data_to_wait_for("usetls"); dispatcher_->run(Event::Dispatcher::RunType::Block); // Without closing the connection, switch to tls. diff --git a/test/integration/BUILD b/test/integration/BUILD index 79b8ed1e71b1..1cf483ad6f89 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -443,11 +443,13 @@ envoy_cc_test( "//source/extensions/filters/http/buffer:config", "//source/extensions/filters/http/health_check:config", "//test/common/http/http2:http2_frame", + "//test/integration/filters:continue_after_local_reply_filter_lib", "//test/integration/filters:continue_headers_only_inject_body", "//test/integration/filters:encoder_decoder_buffer_filter_lib", "//test/integration/filters:invalid_header_filter_lib", "//test/integration/filters:local_reply_during_encoding_filter_lib", "//test/integration/filters:random_pause_filter_lib", + "//test/test_common:logging_lib", "//test/test_common:utility_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/route/v3:pkg_cc_proto", diff --git a/test/integration/README.md b/test/integration/README.md index b470cb061cc6..d16031807a9e 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -18,22 +18,22 @@ initialize(); codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); // Create some request headers. -Http::TestHeaderMapImpl request_headers{{":method", "GET"}, - {":path", "/test/long/url"}, - {":scheme", "http"}, - {":authority", "host"}}; +Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}}; // Send the request headers from the client, wait until they are received upstream. When they // are received, send the default response headers from upstream and wait until they are // received at by client -sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); +auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); // Verify the proxied request was received upstream, as expected. EXPECT_TRUE(upstream_request_->complete()); EXPECT_EQ(0U, upstream_request_->bodyLength()); // Verify the proxied response was received downstream, as expected. -EXPECT_TRUE(response_->complete()); -EXPECT_STREQ("200", response_->headers().Status()->value().c_str()); +EXPECT_TRUE(response->complete()); +EXPECT_STREQ("200", response->headers().Status()->value().c_str()); EXPECT_EQ(0U, response_->body().size()); ``` diff --git a/test/integration/ads_integration_test.cc b/test/integration/ads_integration_test.cc index afb5a1822b57..0a0a61a56034 100644 --- a/test/integration/ads_integration_test.cc +++ b/test/integration/ads_integration_test.cc @@ -307,6 +307,32 @@ TEST_P(AdsIntegrationTest, Failure) { makeSingleRequest(); } +// Regression test for https://github.com/envoyproxy/envoy/issues/9682. +TEST_P(AdsIntegrationTest, ResendNodeOnStreamReset) { + initialize(); + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "", {}, {}, {}, true)); + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {buildCluster("cluster_0")}, + {buildCluster("cluster_0")}, {}, "1"); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().ClusterLoadAssignment, "", + {"cluster_0"}, {"cluster_0"}, {})); + sendDiscoveryResponse( + Config::TypeUrl::get().ClusterLoadAssignment, {buildClusterLoadAssignment("cluster_0")}, + {buildClusterLoadAssignment("cluster_0")}, {}, "1"); + + // A second CDS request should be sent so that the node is cleared in the cached request. + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {}, {}, {})); + + xds_stream_->finishGrpcStream(Grpc::Status::Internal); + AssertionResult result = xds_connection_->waitForNewStream(*dispatcher_, xds_stream_); + RELEASE_ASSERT(result, result.message()); + xds_stream_->startGrpcStream(); + + EXPECT_TRUE(compareDiscoveryRequest(Config::TypeUrl::get().Cluster, "1", {"cluster_0"}, + {"cluster_0"}, {}, true)); +} + // Validate that xds can support a mix of v2 and v3 type url. TEST_P(AdsIntegrationTest, MixV2V3TypeUrlInDiscoveryResponse) { config_helper_.addRuntimeOverride( diff --git a/test/integration/autonomous_upstream.cc b/test/integration/autonomous_upstream.cc index e2185bbbf2a6..16d49cb50e60 100644 --- a/test/integration/autonomous_upstream.cc +++ b/test/integration/autonomous_upstream.cc @@ -21,6 +21,7 @@ const char AutonomousStream::RESPONSE_SIZE_BYTES[] = "response_size_bytes"; const char AutonomousStream::RESPONSE_DATA_BLOCKS[] = "response_data_blocks"; const char AutonomousStream::EXPECT_REQUEST_SIZE_BYTES[] = "expect_request_size_bytes"; const char AutonomousStream::RESET_AFTER_REQUEST[] = "reset_after_request"; +const char AutonomousStream::CLOSE_AFTER_RESPONSE[] = "close_after_response"; const char AutonomousStream::NO_TRAILERS[] = "no_trailers"; const char AutonomousStream::NO_END_STREAM[] = "no_end_stream"; @@ -84,6 +85,11 @@ void AutonomousStream::sendResponse() { encodeTrailers(upstream_.responseTrailers()); } } + if (!headers.get_(CLOSE_AFTER_RESPONSE).empty()) { + parent_.connection().dispatcher().post( + [this]() -> void { parent_.connection().close(Network::ConnectionCloseType::FlushWrite); }); + return; + } } AutonomousHttpConnection::AutonomousHttpConnection(AutonomousUpstream& autonomous_upstream, diff --git a/test/integration/autonomous_upstream.h b/test/integration/autonomous_upstream.h index 3a5acd2443a5..d5ae283fec56 100644 --- a/test/integration/autonomous_upstream.h +++ b/test/integration/autonomous_upstream.h @@ -25,6 +25,8 @@ class AutonomousStream : public FakeStream { static const char NO_TRAILERS[]; // Prevents upstream from finishing response. static const char NO_END_STREAM[]; + // Closes the underlying connection after a given response is sent. + static const char CLOSE_AFTER_RESPONSE[]; AutonomousStream(FakeHttpConnection& parent, Http::ResponseEncoder& encoder, AutonomousUpstream& upstream, bool allow_incomplete_streams); diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index f0d2e5874b89..fcab653b157f 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -222,9 +222,9 @@ class FakeStream : public Http::RequestDecoder, absl::Mutex lock_; Http::RequestHeaderMapPtr headers_ ABSL_GUARDED_BY(lock_); Buffer::OwnedImpl body_ ABSL_GUARDED_BY(lock_); + FakeHttpConnection& parent_; private: - FakeHttpConnection& parent_; Http::ResponseEncoder& encoder_; Http::RequestTrailerMapPtr trailers_ ABSL_GUARDED_BY(lock_); bool end_stream_ ABSL_GUARDED_BY(lock_){}; diff --git a/test/integration/filters/BUILD b/test/integration/filters/BUILD index 8e25cc2c1622..1d78984323f3 100644 --- a/test/integration/filters/BUILD +++ b/test/integration/filters/BUILD @@ -39,6 +39,21 @@ envoy_cc_test_library( ], ) +envoy_cc_test_library( + name = "continue_after_local_reply_filter_lib", + srcs = [ + "continue_after_local_reply_filter.cc", + ], + deps = [ + ":common_lib", + "//include/envoy/http:filter_interface", + "//include/envoy/registry", + "//include/envoy/server:filter_config_interface", + "//source/extensions/filters/http/common:pass_through_filter_lib", + "//test/extensions/filters/http/common:empty_http_filter_config_lib", + ], +) + envoy_cc_test_library( name = "continue_headers_only_inject_body", srcs = [ diff --git a/test/integration/filters/continue_after_local_reply_filter.cc b/test/integration/filters/continue_after_local_reply_filter.cc new file mode 100644 index 000000000000..be79f0c84ccf --- /dev/null +++ b/test/integration/filters/continue_after_local_reply_filter.cc @@ -0,0 +1,30 @@ +#include "envoy/registry/registry.h" +#include "envoy/server/filter_config.h" + +#include "extensions/filters/http/common/pass_through_filter.h" + +#include "test/extensions/filters/http/common/empty_http_filter_config.h" +#include "test/integration/filters/common.h" + +#include "gtest/gtest.h" + +namespace Envoy { + +// A filter that only calls Http::FilterHeadersStatus::Continue after a local reply. +class ContinueAfterLocalReplyFilter : public Http::PassThroughFilter { +public: + constexpr static char name[] = "continue-after-local-reply-filter"; + + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap&, bool) override { + decoder_callbacks_->sendLocalReply(Envoy::Http::Code::OK, "", nullptr, absl::nullopt, + "ContinueAfterLocalReplyFilter is ready"); + return Http::FilterHeadersStatus::Continue; + } +}; + +constexpr char ContinueAfterLocalReplyFilter::name[]; +static Registry::RegisterFactory, + Server::Configuration::NamedHttpFilterConfigFactory> + register_; + +} // namespace Envoy diff --git a/test/integration/integration_tcp_client.cc b/test/integration/integration_tcp_client.cc index 500d26d42aec..aabf2a957d95 100644 --- a/test/integration/integration_tcp_client.cc +++ b/test/integration/integration_tcp_client.cc @@ -48,7 +48,12 @@ IntegrationTcpClient::IntegrationTcpClient( client_write_buffer_ = new NiceMock(below_low, above_high, above_overflow); return client_write_buffer_; + })) + .WillRepeatedly(Invoke([](std::function below_low, std::function above_high, + std::function above_overflow) -> Buffer::Instance* { + return new Buffer::WatermarkBuffer(below_low, above_high, above_overflow); })); + ; connection_ = dispatcher.createClientConnection( Network::Utility::resolveUrl( diff --git a/test/integration/local_reply_integration_test.cc b/test/integration/local_reply_integration_test.cc index 74da5ae0c25a..8bf22764eb10 100644 --- a/test/integration/local_reply_integration_test.cc +++ b/test/integration/local_reply_integration_test.cc @@ -411,4 +411,59 @@ TEST_P(LocalReplyIntegrationTest, ShouldFormatResponseToCustomString) { EXPECT_EQ(response->body(), "513 - customized body text"); } +// Should return formatted text/plain response. +TEST_P(LocalReplyIntegrationTest, ShouldFormatResponseToEmptyBody) { + const std::string yaml = R"EOF( +mappers: +- filter: + status_code_filter: + comparison: + op: EQ + value: + default_value: 503 + runtime_key: key_b + status_code: 513 + body: + inline_string: "" +body_format: + text_format_source: + inline_string: "" +)EOF"; + setLocalReplyConfig(yaml); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto encoder_decoder = codec_client_->startRequest( + Http::TestRequestHeaderMapImpl{{":method", "POST"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"test-header", "exact-match-value-2"}}); + auto response = std::move(encoder_decoder.second); + + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForHeadersComplete()); + ASSERT_TRUE(fake_upstream_connection_->close()); + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + response->waitForEndStream(); + + if (downstream_protocol_ == Http::CodecClient::Type::HTTP1) { + ASSERT_TRUE(codec_client_->waitForDisconnect()); + } else { + codec_client_->close(); + } + + EXPECT_FALSE(upstream_request_->complete()); + EXPECT_EQ(0U, upstream_request_->bodyLength()); + + EXPECT_TRUE(response->complete()); + + EXPECT_EQ("513", response->headers().Status()->value().getStringView()); + + EXPECT_EQ(response->body(), ""); +} + } // namespace Envoy diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 68865b4c16e7..fe572d7302de 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -34,6 +34,7 @@ #include "test/mocks/upstream/retry_priority.h" #include "test/mocks/upstream/retry_priority_factory.h" #include "test/test_common/environment.h" +#include "test/test_common/logging.h" #include "test/test_common/network_utility.h" #include "test/test_common/registry.h" @@ -287,6 +288,40 @@ TEST_P(ProtocolIntegrationTest, ContinueHeadersOnlyInjectBodyFilter) { EXPECT_EQ(response->body(), "body"); } +// Tests a filter that returns a FilterHeadersStatus::Continue after a local reply. In debug mode, +// this fails on ENVOY_BUG. In opt mode, the status is corrected and the failure is logged. +TEST_P(ProtocolIntegrationTest, ContinueAfterLocalReply) { + config_helper_.addFilter(R"EOF( + name: continue-after-local-reply-filter + typed_config: + "@type": type.googleapis.com/google.protobuf.Empty + )EOF"); + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Send a headers only request. + IntegrationStreamDecoderPtr response; + const std::string error = "envoy bug failure: !state_.local_complete_ || status == " + "FilterHeadersStatus::StopIteration. Details: Filters should return " + "FilterHeadersStatus::StopIteration after sending a local reply."; +#ifdef NDEBUG + EXPECT_LOG_CONTAINS("error", error, { + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + }); + EXPECT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +#else + EXPECT_DEATH( + { + response = codec_client_->makeHeaderOnlyRequest(default_request_headers_); + response->waitForEndStream(); + }, + error); +#endif +} + TEST_P(ProtocolIntegrationTest, AddEncodedTrailers) { config_helper_.addFilter(R"EOF( name: add-trailers-filter @@ -1993,10 +2028,10 @@ TEST_P(ProtocolIntegrationTest, ConnDurationTimeoutNoHttpRequest) { test_server_->waitForCounterGe("http.config_test.downstream_cx_max_duration_reached", 1); } -TEST_P(DownstreamProtocolIntegrationTest, TestPrefetch) { +TEST_P(DownstreamProtocolIntegrationTest, TestPreconnect) { config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); - cluster->mutable_prefetch_policy()->mutable_per_upstream_prefetch_ratio()->set_value(1.5); + cluster->mutable_preconnect_policy()->mutable_per_upstream_preconnect_ratio()->set_value(1.5); }); initialize(); codec_client_ = makeHttpConnection(lookupPort("http")); @@ -2004,7 +2039,7 @@ TEST_P(DownstreamProtocolIntegrationTest, TestPrefetch) { sendRequestAndWaitForResponse(default_request_headers_, 0, default_response_headers_, 0); FakeHttpConnectionPtr fake_upstream_connection_two; if (upstreamProtocol() == FakeHttpConnection::Type::HTTP1) { - // For HTTP/1.1 there should be a prefetched connection. + // For HTTP/1.1 there should be a preconnected connection. ASSERT_TRUE( fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_two)); } else { diff --git a/test/integration/server.h b/test/integration/server.h index 9d70d735bbb4..87544c711815 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -415,6 +415,7 @@ class IntegrationTestServer : public Logger::Loggable, on_server_ready_cb_ = std::move(on_server_ready); } void onRuntimeCreated() override {} + void onWorkersStarted() override {} void start(const Network::Address::IpVersion version, std::function on_server_init_function, bool deterministic, diff --git a/test/integration/stats_integration_test.cc b/test/integration/stats_integration_test.cc index 376e416e44fd..f2b7bee41230 100644 --- a/test/integration/stats_integration_test.cc +++ b/test/integration/stats_integration_test.cc @@ -266,7 +266,7 @@ TEST_P(ClusterMemoryTestRunner, MemoryLargeClusterSize) { // 2020/07/31 12035 37114 38000 Init manager store unready targets in hash map. // 2020/08/10 12275 37061 38000 Re-organize tls histogram maps to improve continuity. // 2020/08/11 12202 37061 38500 router: add new retry back-off strategy - // 2020/09/11 12973 38993 upstream: predictive prefetch + // 2020/09/11 12973 38993 upstream: predictive preconnect // 2020/10/02 13251 39326 switch to google tcmalloc // Note: when adjusting this value: EXPECT_MEMORY_EQ is active only in CI diff --git a/test/integration/tcp_proxy_integration_test.cc b/test/integration/tcp_proxy_integration_test.cc index 9682480725ba..e63c9d7d9d3f 100644 --- a/test/integration/tcp_proxy_integration_test.cc +++ b/test/integration/tcp_proxy_integration_test.cc @@ -25,6 +25,7 @@ using testing::_; using testing::AtLeast; +using testing::HasSubstr; using testing::Invoke; using testing::MatchesRegex; using testing::NiceMock; @@ -91,6 +92,9 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamWritesFirst) { ASSERT_TRUE(tcp_client->write("", true)); ASSERT_TRUE(fake_upstream_connection->waitForHalfClose()); ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); + // Any time an associated connection is destroyed, it increments both counters. + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy", 1); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy_with_active_rq", 1); } // Test TLS upstream. @@ -111,6 +115,9 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyUpstreamTls) { tcp_client->close(); EXPECT_EQ("world", tcp_client->data()); + // Any time an associated connection is destroyed, it increments both counters. + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy", 1); + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_destroy_with_active_rq", 1); } // Test proxying data in both directions, and that all data is flushed properly @@ -150,6 +157,86 @@ TEST_P(TcpProxyIntegrationTest, TcpProxyDownstreamDisconnect) { tcp_client->waitForDisconnect(); } +TEST_P(TcpProxyIntegrationTest, TcpProxyManyConnections) { + autonomous_upstream_ = true; + initialize(); + const int num_connections = 50; + std::vector clients(num_connections); + + for (int i = 0; i < num_connections; ++i) { + clients[i] = makeTcpConnection(lookupPort("tcp_proxy")); + } + test_server_->waitForCounterGe("cluster.cluster_0.upstream_cx_total", num_connections); + for (int i = 0; i < num_connections; ++i) { + IntegrationTcpClientPtr& tcp_client = clients[i]; + // The autonomous upstream is an HTTP upstream, so send raw HTTP. + // This particular request will result in the upstream sending a response, + // and flush-closing due to the 'close_after_response' header. + ASSERT_TRUE(tcp_client->write( + "GET / HTTP/1.1\r\nHost: foo\r\nclose_after_response: yes\r\ncontent-length: 0\r\n\r\n", + false)); + tcp_client->waitForHalfClose(); + tcp_client->close(); + EXPECT_THAT(tcp_client->data(), HasSubstr("aaaaaaaaaa")); + } +} + +TEST_P(TcpProxyIntegrationTest, TcpProxyRandomBehavior) { + autonomous_upstream_ = true; + initialize(); + std::list clients; + + // The autonomous upstream parses HTTP, and HTTP headers and sends responses + // when full requests are received. basic_request will result in + // bidirectional data. request_with_close will result in bidirectional data, + // but also the upstream closing the connection. + const char* basic_request = "GET / HTTP/1.1\r\nHost: foo\r\ncontent-length: 0\r\n\r\n"; + const char* request_with_close = + "GET / HTTP/1.1\r\nHost: foo\r\nclose_after_response: yes\r\ncontent-length: 0\r\n\r\n"; + TestRandomGenerator rand; + + // Seed some initial clients + for (int i = 0; i < 5; ++i) { + clients.push_back(makeTcpConnection(lookupPort("tcp_proxy"))); + } + + // Now randomly write / add more connections / close. + for (int i = 0; i < 50; ++i) { + int action = rand.random() % 3; + + if (action == 0) { + // Add a new connection. + clients.push_back(makeTcpConnection(lookupPort("tcp_proxy"))); + } + if (clients.empty()) { + break; + } + IntegrationTcpClientPtr& tcp_client = clients.front(); + if (action == 1) { + // Write to the first connection. + ASSERT_TRUE(tcp_client->write(basic_request, false)); + tcp_client->waitForData("\r\n\r\n", false); + tcp_client->clearData(tcp_client->data().size()); + } else if (action == 2) { + // Close the first connection. + ASSERT_TRUE(tcp_client->write(request_with_close, false)); + tcp_client->waitForData("\r\n\r\n", false); + tcp_client->waitForHalfClose(); + tcp_client->close(); + clients.pop_front(); + } + } + + while (!clients.empty()) { + IntegrationTcpClientPtr& tcp_client = clients.front(); + ASSERT_TRUE(tcp_client->write(request_with_close, false)); + tcp_client->waitForData("\r\n\r\n", false); + tcp_client->waitForHalfClose(); + tcp_client->close(); + clients.pop_front(); + } +} + TEST_P(TcpProxyIntegrationTest, NoUpstream) { // Set the first upstream to have an invalid port, so connection will fail, // but it won't fail synchronously (as it would if there were simply no diff --git a/test/mocks/buffer/BUILD b/test/mocks/buffer/BUILD index 38d61c302cf4..1d7b6a1dd55d 100644 --- a/test/mocks/buffer/BUILD +++ b/test/mocks/buffer/BUILD @@ -15,6 +15,7 @@ envoy_cc_mock( deps = [ "//source/common/buffer:buffer_lib", "//source/common/buffer:watermark_buffer_lib", + "//test/test_common:printers_lib", "//test/test_common:utility_lib", ], ) diff --git a/test/mocks/http/conn_pool.h b/test/mocks/http/conn_pool.h index cdca565449d0..c455696fe329 100644 --- a/test/mocks/http/conn_pool.h +++ b/test/mocks/http/conn_pool.h @@ -24,7 +24,7 @@ class MockInstance : public Instance { MOCK_METHOD(void, drainConnections, ()); MOCK_METHOD(bool, hasActiveConnections, (), (const)); MOCK_METHOD(Cancellable*, newStream, (ResponseDecoder & response_decoder, Callbacks& callbacks)); - MOCK_METHOD(bool, maybePrefetch, (float)); + MOCK_METHOD(bool, maybePreconnect, (float)); MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); std::shared_ptr> host_; diff --git a/test/mocks/http/mocks.cc b/test/mocks/http/mocks.cc index 639295ae16d3..81f6bcd093ff 100644 --- a/test/mocks/http/mocks.cc +++ b/test/mocks/http/mocks.cc @@ -1,6 +1,7 @@ #include "mocks.h" #include "envoy/buffer/buffer.h" +#include "envoy/common/optref.h" #include "envoy/event/dispatcher.h" #include "envoy/http/header_map.h" @@ -23,10 +24,7 @@ MockServerConnectionCallbacks::~MockServerConnectionCallbacks() = default; MockFilterManagerCallbacks::MockFilterManagerCallbacks() { ON_CALL(*this, responseHeaders()).WillByDefault(Invoke([this]() -> ResponseHeaderMapOptRef { - if (response_headers_) { - return absl::make_optional(std::ref(*response_headers_)); - } - return absl::nullopt; + return makeOptRefFromPtr(response_headers_.get()); })); } MockFilterManagerCallbacks::~MockFilterManagerCallbacks() = default; diff --git a/test/mocks/router/mocks.h b/test/mocks/router/mocks.h index 02be9a0824c1..d4c077683572 100644 --- a/test/mocks/router/mocks.h +++ b/test/mocks/router/mocks.h @@ -253,7 +253,7 @@ class TestVirtualCluster : public VirtualCluster { Stats::StatName statName() const override { return stat_name_.statName(); } VirtualClusterStats& stats() const override { return stats_; } - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Stats::StatNameManagedStorage stat_name_{"fake_virtual_cluster", *symbol_table_}; Stats::IsolatedStoreImpl stats_store_; mutable VirtualClusterStats stats_{generateStats(stats_store_)}; @@ -281,7 +281,7 @@ class MockVirtualHost : public VirtualHost { return stat_name_->statName(); } - mutable Stats::TestSymbolTable symbol_table_; + mutable Stats::TestUtil::TestSymbolTable symbol_table_; std::string name_{"fake_vhost"}; mutable std::unique_ptr stat_name_; testing::NiceMock rate_limit_policy_; diff --git a/test/mocks/router/router_filter_interface.h b/test/mocks/router/router_filter_interface.h index 3e8fbec51cac..a37bf61cffce 100644 --- a/test/mocks/router/router_filter_interface.h +++ b/test/mocks/router/router_filter_interface.h @@ -54,7 +54,7 @@ class MockRouterFilterInterface : public RouterFilterInterface { envoy::extensions::filters::http::router::v3::Router router_proto; NiceMock context_; - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Stats::StatNamePool pool_; FilterConfig config_; Upstream::ClusterInfoConstSharedPtr cluster_info_; diff --git a/test/mocks/server/hot_restart.h b/test/mocks/server/hot_restart.h index c6edd13d8905..02d219fa90ab 100644 --- a/test/mocks/server/hot_restart.h +++ b/test/mocks/server/hot_restart.h @@ -29,7 +29,7 @@ class MockHotRestart : public HotRestart { MOCK_METHOD(Stats::Allocator&, statsAllocator, ()); private: - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Thread::MutexBasicLockable log_lock_; Thread::MutexBasicLockable access_log_lock_; Stats::AllocatorImpl stats_allocator_; diff --git a/test/mocks/server/listener_manager.h b/test/mocks/server/listener_manager.h index 582be9ac9bac..c7c855508f6d 100644 --- a/test/mocks/server/listener_manager.h +++ b/test/mocks/server/listener_manager.h @@ -21,7 +21,7 @@ class MockListenerManager : public ListenerManager { (ListenerState state)); MOCK_METHOD(uint64_t, numConnections, (), (const)); MOCK_METHOD(bool, removeListener, (const std::string& listener_name)); - MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog)); + MOCK_METHOD(void, startWorkers, (GuardDog & guard_dog, std::function callback)); MOCK_METHOD(void, stopListeners, (StopListenersType listeners_type)); MOCK_METHOD(void, stopWorkers, ()); MOCK_METHOD(void, beginListenerUpdate, ()); diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 1970354d7bc3..98a59e6d558d 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -84,7 +84,7 @@ template class MockMetric : public BaseClass { } } - TestSymbolTable symbol_table_; // Must outlive name_. + TestUtil::TestSymbolTable symbol_table_; // Must outlive name_. MetricName name_; void setTags(const TagVector& tags) { @@ -324,7 +324,7 @@ class MockStore : public TestUtil::TestStore { return textReadout(symbol_table_->toString(name)); } - TestSymbolTable symbol_table_; + TestUtil::TestSymbolTable symbol_table_; testing::NiceMock counter_; std::vector> histograms_; }; diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index c03cb1368192..b052e712353a 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -55,7 +55,7 @@ class MockInstance : public Instance { MOCK_METHOD(void, drainConnections, ()); MOCK_METHOD(void, closeConnections, ()); MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks)); - MOCK_METHOD(bool, maybePrefetch, (float), ()); + MOCK_METHOD(bool, maybePreconnect, (float), ()); MOCK_METHOD(Upstream::HostDescriptionConstSharedPtr, host, (), (const)); Envoy::ConnectionPool::MockCancellable* newConnectionImpl(Callbacks& cb); diff --git a/test/mocks/tracing/mocks.h b/test/mocks/tracing/mocks.h index 98a7a96ac513..2d26d4579802 100644 --- a/test/mocks/tracing/mocks.h +++ b/test/mocks/tracing/mocks.h @@ -39,6 +39,7 @@ class MockSpan : public Span { MOCK_METHOD(void, setSampled, (const bool sampled)); MOCK_METHOD(void, setBaggage, (absl::string_view key, absl::string_view value)); MOCK_METHOD(std::string, getBaggage, (absl::string_view key)); + MOCK_METHOD(std::string, getTraceIdAsHex, (), (const)); SpanPtr spawnChild(const Config& config, const std::string& name, SystemTime start_time) override { diff --git a/test/mocks/upstream/cluster_info.cc b/test/mocks/upstream/cluster_info.cc index 60436c8398c2..fd46bb958846 100644 --- a/test/mocks/upstream/cluster_info.cc +++ b/test/mocks/upstream/cluster_info.cc @@ -62,7 +62,7 @@ MockClusterInfo::MockClusterInfo() circuit_breakers_stats_, absl::nullopt, absl::nullopt)) { ON_CALL(*this, connectTimeout()).WillByDefault(Return(std::chrono::milliseconds(1))); ON_CALL(*this, idleTimeout()).WillByDefault(Return(absl::optional())); - ON_CALL(*this, perUpstreamPrefetchRatio()).WillByDefault(Return(1.0)); + ON_CALL(*this, perUpstreamPreconnectRatio()).WillByDefault(Return(1.0)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); ON_CALL(*this, edsServiceName()).WillByDefault(ReturnPointee(&eds_service_name_)); ON_CALL(*this, http1Settings()).WillByDefault(ReturnRef(http1_settings_)); diff --git a/test/mocks/upstream/cluster_info.h b/test/mocks/upstream/cluster_info.h index 08949a68e9dd..35b810b515ed 100644 --- a/test/mocks/upstream/cluster_info.h +++ b/test/mocks/upstream/cluster_info.h @@ -93,7 +93,7 @@ class MockClusterInfo : public ClusterInfo { MOCK_METHOD(const absl::optional, grpcTimeoutHeaderMax, (), (const)); MOCK_METHOD(const absl::optional, grpcTimeoutHeaderOffset, (), (const)); - MOCK_METHOD(float, perUpstreamPrefetchRatio, (), (const)); + MOCK_METHOD(float, perUpstreamPreconnectRatio, (), (const)); MOCK_METHOD(float, peekaheadRatio, (), (const)); MOCK_METHOD(uint32_t, perConnectionBufferLimitBytes, (), (const)); MOCK_METHOD(uint64_t, features, (), (const)); diff --git a/test/mocks/upstream/cluster_manager.h b/test/mocks/upstream/cluster_manager.h index bcff9e0fde11..ff5649caf517 100644 --- a/test/mocks/upstream/cluster_manager.h +++ b/test/mocks/upstream/cluster_manager.h @@ -78,7 +78,7 @@ class MockClusterManager : public ClusterManager { NiceMock subscription_factory_; absl::flat_hash_map> active_clusters_; absl::flat_hash_map> warming_clusters_; - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; ClusterStatNames cluster_stat_names_; ClusterLoadReportStatNames cluster_load_report_stat_names_; ClusterCircuitBreakersStatNames cluster_circuit_breakers_stat_names_; diff --git a/test/mocks/upstream/host.h b/test/mocks/upstream/host.h index e4e102359b63..80ebfe3099b0 100644 --- a/test/mocks/upstream/host.h +++ b/test/mocks/upstream/host.h @@ -113,7 +113,7 @@ class MockHostDescription : public HostDescription { Network::TransportSocketFactoryPtr socket_factory_; testing::NiceMock cluster_; HostStats stats_; - mutable Stats::TestSymbolTable symbol_table_; + mutable Stats::TestUtil::TestSymbolTable symbol_table_; mutable std::unique_ptr locality_zone_stat_name_; }; @@ -199,7 +199,7 @@ class MockHost : public Host { Network::TransportSocketFactoryPtr socket_factory_; testing::NiceMock outlier_detector_; HostStats stats_; - mutable Stats::TestSymbolTable symbol_table_; + mutable Stats::TestUtil::TestSymbolTable symbol_table_; mutable std::unique_ptr locality_zone_stat_name_; }; diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 3adc03ff3ca9..c661347ca8f7 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -12,72 +12,62 @@ declare -a KNOWN_LOW_COVERAGE=( "source/common/filter:96.3" "source/common/filter/http:96.3" "source/common/http/http3:50.0" -"source/common/init:96.2" +"source/common/init:96.4" "source/common/json:90.6" -"source/common/network:95.1" -"source/common/protobuf:94.3" -"source/common/secret:95.2" -"source/common/signal:83.1" # Death tests don't report LCOV +"source/common/network:95.3" +"source/common/protobuf:94.6" +"source/common/signal:84.5" # Death tests don't report LCOV "source/common/singleton:95.1" "source/common/thread:0.0" # Death tests don't report LCOV -"source/common/thread_local:95.7" "source/common/matcher:92.8" "source/common/tracing:94.9" "source/common/watchdog:42.9" # Death tests don't report LCOV -"source/exe:93.7" +"source/exe:93.8" "source/extensions:96.3" "source/extensions/common/crypto:91.5" -"source/extensions/common/tap:95.1" -"source/extensions/common/wasm:85.4" +"source/extensions/common/tap:95.9" +"source/extensions/common/wasm:95.4" "source/extensions/common/wasm/null:77.8" "source/extensions/common/wasm/v8:85.4" "source/extensions/common:94.4" -"source/extensions/filters/common:94.7" -"source/extensions/filters/common/expr:92.2" -"source/extensions/filters/common/fault:94.3" -"source/extensions/filters/common/rbac:87.1" +"source/extensions/filters/common:96.3" +"source/extensions/filters/common/expr:95.8" +"source/extensions/filters/common/fault:94.6" +"source/extensions/filters/common/rbac:87.5" "source/extensions/filters/http/cache:92.4" "source/extensions/filters/http/cache/simple_http_cache:95.2" -"source/extensions/filters/http/dynamic_forward_proxy:94.9" -"source/extensions/filters/http/grpc_json_transcoder:93.3" +"source/extensions/filters/http/dynamic_forward_proxy:95.0" +"source/extensions/filters/http/grpc_json_transcoder:94.8" "source/extensions/filters/http/ip_tagging:91.2" -"source/extensions/filters/http/kill_request:94.4" # Death tests don't report LCOV -"source/extensions/filters/http/oauth2:96.5" -"source/extensions/filters/listener:96.0" -"source/extensions/filters/listener/http_inspector:93.3" +"source/extensions/filters/http/kill_request:95.0" # Death tests don't report LCOV +"source/extensions/filters/listener:96.5" "source/extensions/filters/listener/tls_inspector:92.4" "source/extensions/filters/network/common:96.1" "source/extensions/filters/network/common/redis:96.2" "source/extensions/filters/network/dubbo_proxy:96.1" "source/extensions/filters/network/dubbo_proxy/router:95.1" -"source/extensions/filters/network/http_connection_manager:95.2" -"source/extensions/filters/network/mongo_proxy:94.0" +"source/extensions/filters/network/mongo_proxy:94.1" "source/extensions/filters/network/sni_cluster:90.3" "source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" -"source/extensions/filters/udp:91.1" -"source/extensions/filters/udp/dns_filter:96.9" -"source/extensions/grpc_credentials:92.0" "source/extensions/health_checkers:95.9" "source/extensions/health_checkers/redis:95.9" -"source/extensions/quic_listeners:84.8" +"source/extensions/quic_listeners:85.0" "source/extensions/quic_listeners/quiche:84.8" "source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers:96.0" -"source/extensions/tracers/opencensus:91.2" +"source/extensions/tracers:96.4" +"source/extensions/tracers/opencensus:91.6" "source/extensions/tracers/xray:94.0" "source/extensions/transport_sockets:95.1" -"source/extensions/transport_sockets/tap:95.6" -"source/extensions/transport_sockets/tls/ocsp:95.3" "source/extensions/transport_sockets/tls/private_key:76.9" -"source/extensions/transport_sockets/tls:94.2" +"source/extensions/transport_sockets/tls:94.4" "source/extensions/wasm_runtime:50.0" "source/extensions/wasm_runtime/wasmtime:0.0" # Not enabled in coverage build "source/extensions/wasm_runtime/wavm:0.0" # Noe enabled in coverage build -"source/extensions/watchdog:69.6" # Death tests within extensions -"source/extensions/watchdog/profile_action:84.9" -"source/server:94.5" +"source/extensions/watchdog:85.7" # Death tests within extensions +"source/extensions/watchdog/profile_action:85.7" +"source/server:94.7" "source/server/admin:95.1" -"source/server/config_validation:75.9" +"source/server/config_validation:76.6" ) [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" diff --git a/test/server/admin/prometheus_stats_test.cc b/test/server/admin/prometheus_stats_test.cc index ee0cae35a0c3..bd7ebd530d98 100644 --- a/test/server/admin/prometheus_stats_test.cc +++ b/test/server/admin/prometheus_stats_test.cc @@ -91,7 +91,7 @@ class PrometheusStatsFormatterTest : public testing::Test { EXPECT_EQ(0, symbol_table_->numSymbols()); } - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; Stats::AllocatorImpl alloc_; Stats::StatNamePool pool_; std::vector counters_; diff --git a/test/server/guarddog_impl_test.cc b/test/server/guarddog_impl_test.cc index 9641c5afc2a4..d5ff1f4698b4 100644 --- a/test/server/guarddog_impl_test.cc +++ b/test/server/guarddog_impl_test.cc @@ -625,7 +625,19 @@ TEST_P(GuardDogActionsTest, MissShouldOnlyReportRelevantThreads) { // synchronize with the guard dog. guard_dog_->forceCheckForTest(); + if (GetParam() == TimeSystemType::Real) { + // Touch the second_dog in case we overslept in the real time system + // and the guard dog timer goes off. + second_dog_->touch(); + } + time_system_->advanceTimeWait(std::chrono::milliseconds(51)); + + if (GetParam() == TimeSystemType::Real) { + // Touch the second_dog in case we overslept in the real time system + // and the prior "touch" was consumed. + second_dog_->touch(); + } guard_dog_->forceCheckForTest(); EXPECT_THAT(events_, ElementsAre("MISS : 10")); @@ -687,7 +699,19 @@ TEST_P(GuardDogActionsTest, MegaMissShouldOnlyReportRelevantThreads) { // synchronize with the guard dog. guard_dog_->forceCheckForTest(); + if (GetParam() == TimeSystemType::Real) { + // Touch the second_dog in case we overslept in the real time system + // and the guard dog timer goes off. + second_dog_->touch(); + } + time_system_->advanceTimeWait(std::chrono::milliseconds(51)); + + if (GetParam() == TimeSystemType::Real) { + // Touch the second_dog in case we overslept in the real time system + // and the prior "touch" was consumed. + second_dog_->touch(); + } guard_dog_->forceCheckForTest(); EXPECT_THAT(events_, ElementsAre("MEGAMISS : 10")); diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 1d5da4a8eeab..86121c09ccee 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -335,7 +335,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransportSocketConnectTimeout) { TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { EXPECT_CALL(*worker_, start(_)); EXPECT_FALSE(manager_->isWorkerStarted()); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Validate that there are no active listeners and workers are started. EXPECT_EQ(0, server_.stats_store_ .gauge("listener_manager.total_active_listeners", @@ -873,7 +873,7 @@ version_info: version1 )EOF"); EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Now add new version listener foo after workers start, note it's fine that server_init_mgr is // initialized, as no target will be added to it. @@ -960,7 +960,7 @@ filter_chains: {} .RetiresOnSaturation(); EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_create_success").value()); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1102,7 +1102,7 @@ version_info: version2 // Start workers. EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Validate that workers_started stat is still zero before workers set the status via // completion callback. EXPECT_EQ(0, server_.stats_store_ @@ -1307,7 +1307,7 @@ TEST_F(ListenerManagerImplTest, UpdateActiveToWarmAndBack) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add and initialize foo listener. const std::string listener_foo_yaml = R"EOF( @@ -1368,7 +1368,7 @@ TEST_F(ListenerManagerImplTest, AddReusableDrainingListener) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener directly into active. const std::string listener_foo_yaml = R"EOF( @@ -1428,7 +1428,7 @@ TEST_F(ListenerManagerImplTest, AddClosedDrainingListener) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener directly into active. const std::string listener_foo_yaml = R"EOF( @@ -1481,7 +1481,7 @@ TEST_F(ListenerManagerImplTest, BindToPortEqualToFalse) { InSequence s; ProdListenerComponentFactory real_listener_factory(server_); EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); const std::string listener_foo_yaml = R"EOF( name: foo address: @@ -1519,7 +1519,7 @@ TEST_F(ListenerManagerImplTest, ReusePortEqualToTrue) { InSequence s; ProdListenerComponentFactory real_listener_factory(server_); EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); const std::string listener_foo_yaml = R"EOF( name: foo address: @@ -1574,7 +1574,7 @@ TEST_F(ListenerManagerImplTest, CantBindSocket) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); const std::string listener_foo_yaml = R"EOF( name: foo @@ -1627,7 +1627,7 @@ TEST_F(ListenerManagerImplTest, ConfigDumpWithExternalError) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Make sure the config dump is empty by default. ListenerManager::FailureStates empty_failure_state; @@ -1663,7 +1663,7 @@ TEST_F(ListenerManagerImplTest, ListenerDraining) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); const std::string listener_foo_yaml = R"EOF( name: foo @@ -1713,7 +1713,7 @@ TEST_F(ListenerManagerImplTest, RemoveListener) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Remove an unknown listener. EXPECT_FALSE(manager_->removeListener("unknown")); @@ -1795,7 +1795,7 @@ TEST_F(ListenerManagerImplTest, StopListeners) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener in inbound direction. const std::string listener_foo_yaml = R"EOF( @@ -1900,7 +1900,7 @@ TEST_F(ListenerManagerImplTest, StopAllListeners) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into warming. const std::string listener_foo_yaml = R"EOF( @@ -1948,7 +1948,7 @@ TEST_F(ListenerManagerImplTest, StopWarmingListener) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into warming. const std::string listener_foo_yaml = R"EOF( @@ -2005,7 +2005,7 @@ TEST_F(ListenerManagerImplTest, AddListenerFailure) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into active. const std::string listener_foo_yaml = R"EOF( @@ -2042,7 +2042,7 @@ TEST_F(ListenerManagerImplTest, StaticListenerAddFailure) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into active. const std::string listener_foo_yaml = R"EOF( @@ -2096,7 +2096,7 @@ TEST_F(ListenerManagerImplTest, DuplicateAddressDontBind) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into warming. const std::string listener_foo_yaml = R"EOF( @@ -4223,7 +4223,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, VerifyIgnoreExpirationWithCA) { TEST_F(ListenerManagerImplWithDispatcherStatsTest, DispatherStatsWithCorrectPrefix) { EXPECT_CALL(*worker_, start(_)); EXPECT_CALL(*worker_, initializeStats(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); } TEST_F(ListenerManagerImplWithRealFiltersTest, ApiListener) { @@ -4352,7 +4352,7 @@ TEST_F(ListenerManagerImplTest, StopInplaceWarmingListener) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into warming. const std::string listener_foo_yaml = R"EOF( @@ -4414,7 +4414,7 @@ TEST_F(ListenerManagerImplTest, RemoveInplaceUpdatingListener) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into warming. const std::string listener_foo_yaml = R"EOF( @@ -4483,7 +4483,7 @@ TEST_F(ListenerManagerImplTest, UpdateInplaceWarmingListener) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into warming. const std::string listener_foo_yaml = R"EOF( @@ -4546,7 +4546,7 @@ TEST_F(ListenerManagerImplTest, DrainageDuringInplaceUpdate) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener into warming. const std::string listener_foo_yaml = R"EOF( @@ -4696,7 +4696,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWo TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAnyListenerIsNotTcp) { EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); auto listener_proto = createDefaultListener(); @@ -4722,7 +4722,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, auto tls_inspector_injection_enabled_guard = enableTlsInspectorInjectionForThisTest(); EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); auto listener_proto = createDefaultListener(); @@ -4745,10 +4745,10 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, } TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, - TraditionalUpdateIfImplicitProxyProtocolChanges) { + DEPRECATED_FEATURE_TEST(TraditionalUpdateIfImplicitProxyProtocolChanges)) { EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); auto listener_proto = createDefaultListener(); @@ -4768,7 +4768,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateOnZeroFilterChain) { EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); auto listener_proto = createDefaultListener(); @@ -4792,7 +4792,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateOnZe TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfListenerConfigHasUpdateOtherThanFilterChain) { EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); auto listener_proto = createDefaultListener(); @@ -4816,7 +4816,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TEST_F(ListenerManagerImplTest, RuntimeDisabledInPlaceUpdateFallbacksToTraditionalUpdate) { InSequence s; EXPECT_CALL(*worker_, start(_)); - manager_->startWorkers(guard_dog_); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); // Add foo listener. const std::string listener_foo_yaml = R"EOF( @@ -4951,6 +4951,14 @@ TEST_F(ListenerManagerImplTest, TcpBacklogCustomConfig) { EXPECT_EQ(100U, manager_->listeners().back().get().tcpBacklogSize()); } +TEST_F(ListenerManagerImplTest, WorkersStartedCallbackCalled) { + InSequence s; + + EXPECT_CALL(*worker_, start(_)); + EXPECT_CALL(callback_, Call()); + manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 01104f1729e4..68df5988d9b1 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -304,6 +304,7 @@ class ListenerManagerImplTest : public testing::Test { std::unique_ptr socket_; uint64_t listener_tag_{1}; bool enable_dispatcher_stats_{false}; + NiceMock> callback_; }; } // namespace Server diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 162126c25b3c..c09fb90ef732 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -181,6 +181,11 @@ class ServerInstanceImplTestBase { void initialize(const std::string& bootstrap_path) { initialize(bootstrap_path, false); } void initialize(const std::string& bootstrap_path, const bool use_intializing_instance) { + initialize(bootstrap_path, use_intializing_instance, hooks_); + } + + void initialize(const std::string& bootstrap_path, const bool use_intializing_instance, + ListenerHooks& hooks) { if (options_.config_path_.empty()) { options_.config_path_ = TestEnvironment::temporaryFileSubstitute( bootstrap_path, {{"upstream_0", 0}, {"upstream_1", 0}}, version_); @@ -194,7 +199,7 @@ class ServerInstanceImplTestBase { server_ = std::make_unique( *init_manager_, options_, time_system_, - std::make_shared("127.0.0.1"), hooks_, restart_, + std::make_shared("127.0.0.1"), hooks, restart_, stats_store_, fakelock_, component_factory_, std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), @@ -313,6 +318,18 @@ class CustomStatsSinkFactory : public Server::Configuration::StatsSinkFactory { std::string name() const override { return "envoy.custom_stats_sink"; } }; +// CustomListenerHooks is used for synchronization between test thread and server thread. +class CustomListenerHooks : public DefaultListenerHooks { +public: + CustomListenerHooks(std::function workers_started_cb) + : on_workers_started_cb_(workers_started_cb) {} + + void onWorkersStarted() override { on_workers_started_cb_(); } + +private: + std::function on_workers_started_cb_; +}; + INSTANTIATE_TEST_SUITE_P(IpVersions, ServerInstanceImplTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); @@ -423,6 +440,37 @@ TEST_P(ServerInstanceImplTest, LifecycleNotifications) { server_thread->join(); } +TEST_P(ServerInstanceImplTest, DrainParentListenerAfterWorkersStarted) { + bool workers_started = false; + absl::Notification workers_started_fired, workers_started_block; + // Expect drainParentListeners not to be called before workers start. + EXPECT_CALL(restart_, drainParentListeners).Times(0); + + // Run the server in a separate thread so we can test different lifecycle stages. + auto server_thread = Thread::threadFactoryForTest().createThread([&] { + auto hooks = CustomListenerHooks([&]() { + workers_started = true; + workers_started_fired.Notify(); + workers_started_block.WaitForNotification(); + }); + initialize("test/server/test_data/server/node_bootstrap.yaml", false, hooks); + server_->run(); + server_ = nullptr; + thread_local_ = nullptr; + }); + + workers_started_fired.WaitForNotification(); + EXPECT_TRUE(workers_started); + EXPECT_TRUE(TestUtility::findGauge(stats_store_, "server.state")->used()); + EXPECT_EQ(0L, TestUtility::findGauge(stats_store_, "server.state")->value()); + + EXPECT_CALL(restart_, drainParentListeners); + workers_started_block.Notify(); + + server_->dispatcher().post([&] { server_->shutdown(); }); + server_thread->join(); +} + // A test target which never signals that it is ready. class NeverReadyTarget : public Init::TargetImpl { public: @@ -463,6 +511,31 @@ TEST_P(ServerInstanceImplTest, NoLifecycleNotificationOnEarlyShutdown) { server_thread->join(); } +TEST_P(ServerInstanceImplTest, ShutdownBeforeWorkersStarted) { + // Test that drainParentListeners() should never be called because we will shutdown + // early before the server starts worker threads. + EXPECT_CALL(restart_, drainParentListeners).Times(0); + + auto server_thread = Thread::threadFactoryForTest().createThread([&] { + initialize("test/server/test_data/server/node_bootstrap.yaml"); + + auto post_init_handle = server_->registerCallback(ServerLifecycleNotifier::Stage::PostInit, + [&] { server_->shutdown(); }); + + // This shutdown notification should never be called because we will shutdown early. + auto shutdown_handle = server_->registerCallback(ServerLifecycleNotifier::Stage::ShutdownExit, + [&](Event::PostCb) { FAIL(); }); + server_->run(); + + post_init_handle = nullptr; + shutdown_handle = nullptr; + server_ = nullptr; + thread_local_ = nullptr; + }); + + server_thread->join(); +} + TEST_P(ServerInstanceImplTest, V2ConfigOnly) { options_.service_cluster_name_ = "some_cluster_name"; options_.service_node_name_ = "some_node_name"; diff --git a/test/test_common/BUILD b/test/test_common/BUILD index 0ba5bf70ebc9..450477971d89 100644 --- a/test/test_common/BUILD +++ b/test/test_common/BUILD @@ -11,15 +11,6 @@ licenses(["notice"]) # Apache 2 envoy_package() -envoy_basic_cc_library( - name = "printers_includes", - hdrs = ["printers.h"], - deps = [ - ":printers_lib", - "//include/envoy/network:address_interface", - ], -) - envoy_cc_test_library( name = "environment_lib", srcs = ["environment.cc"], @@ -87,10 +78,8 @@ envoy_cc_test_library( envoy_cc_library( name = "printers_lib", - srcs = [ - "printers.cc", - "printers.h", - ], + srcs = ["printers.cc"], + hdrs = ["printers.h"], deps = [ "//include/envoy/network:address_interface", "//source/common/buffer:buffer_lib", @@ -121,6 +110,7 @@ envoy_cc_test_library( ], deps = [ ":file_system_for_test_lib", + ":printers_lib", ":resources_lib", ":test_time_lib", ":thread_factory_for_test_lib", diff --git a/test/test_common/wasm_base.h b/test/test_common/wasm_base.h index 2cfc796084eb..99197e27d2e0 100644 --- a/test/test_common/wasm_base.h +++ b/test/test_common/wasm_base.h @@ -37,7 +37,7 @@ namespace Wasm { log_(static_cast(level), message); \ return proxy_wasm::WasmResult::Ok; \ } \ - MOCK_METHOD2(log_, void(spdlog::level::level_enum level, absl::string_view message)) + MOCK_METHOD(void, log_, (spdlog::level::level_enum level, absl::string_view message)) class DeferredRunner { public: diff --git a/test/tools/router_check/router.h b/test/tools/router_check/router.h index a566b0bb7224..337d54832abc 100644 --- a/test/tools/router_check/router.h +++ b/test/tools/router_check/router.h @@ -50,7 +50,7 @@ struct ToolConfig { private: ToolConfig(std::unique_ptr request_headers, std::unique_ptr response_headers, int random_value); - Stats::TestSymbolTable symbol_table_; + Stats::TestUtil::TestSymbolTable symbol_table_; }; /** diff --git a/tools/code_format/check_format.py b/tools/code_format/check_format.py index c7b68dba7cb9..b31515f4c67b 100755 --- a/tools/code_format/check_format.py +++ b/tools/code_format/check_format.py @@ -153,6 +153,7 @@ VERSION_HISTORY_SECTION_NAME = re.compile("^[A-Z][A-Za-z ]*$") RELOADABLE_FLAG_REGEX = re.compile(".*(..)(envoy.reloadable_features.[^ ]*)\s.*") INVALID_REFLINK = re.compile(".* ref:.*") +OLD_MOCK_METHOD_REGEX = re.compile("MOCK_METHOD\d") # Check for punctuation in a terminal ref clause, e.g. # :ref:`panic mode. ` REF_WITH_PUNCTUATION_REGEX = re.compile(".*\. <[^<]*>`\s*") @@ -773,6 +774,9 @@ def checkSourceLine(self, line, file_path, reportError): # Matches variants of TEST(), TEST_P(), TEST_F() etc. where the test name begins # with a lowercase letter. reportError("Test names should be CamelCase, starting with a capital letter") + if OLD_MOCK_METHOD_REGEX.search(line): + reportError("The MOCK_METHODn() macros should not be used, use MOCK_METHOD() instead") + if not self.allowlistedForSerializeAsString(file_path) and "SerializeAsString" in line: # The MessageLite::SerializeAsString doesn't generate deterministic serialization, # use MessageUtil::hash instead. diff --git a/tools/code_format/check_format_test_helper.py b/tools/code_format/check_format_test_helper.py index 25c8ec12c107..6b1b0092f44e 100755 --- a/tools/code_format/check_format_test_helper.py +++ b/tools/code_format/check_format_test_helper.py @@ -230,6 +230,7 @@ def runChecks(): "Don't use mangled Protobuf names for enum constants") errors += checkUnfixableError("test_naming.cc", "Test names should be CamelCase, starting with a capital letter") + errors += checkUnfixableError("mock_method_n.cc", "use MOCK_METHOD() instead") errors += checkUnfixableError( "test/register_factory.cc", "Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use " diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index f9a3cc2862f6..8d87a26db725 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -30,6 +30,7 @@ DSR HEXDIG HEXDIGIT OWS +Preconnecting STATNAME SkyWalking TIDs @@ -897,6 +898,10 @@ precompile precompiled precompute precomputed +preconnect +preconnected +preconnecting +preconnects predeclared prefetch prefetched diff --git a/tools/testdata/check_format/mock_method_n.cc b/tools/testdata/check_format/mock_method_n.cc new file mode 100644 index 000000000000..b783642c07ae --- /dev/null +++ b/tools/testdata/check_format/mock_method_n.cc @@ -0,0 +1,7 @@ +namespace Envoy { + +struct Class { + MOCK_METHOD1(name, void()); +}; + +} // namespace Envoy