diff --git a/docs/aggregations/bucket/parent/parent-aggregation-usage.asciidoc b/docs/aggregations/bucket/parent/parent-aggregation-usage.asciidoc index 44c51475499..e96436e092d 100644 --- a/docs/aggregations/bucket/parent/parent-aggregation-usage.asciidoc +++ b/docs/aggregations/bucket/parent/parent-aggregation-usage.asciidoc @@ -47,7 +47,6 @@ new ParentAggregation("name_of_parent_agg", typeof(CommitActivity)) <1> } ---- <1> `join` field is determined from the _child_ type. In this example, it is `CommitActivity` - <2> sub-aggregations are on the type determined from the `join` field. In this example, a `Project` is a parent of `CommitActivity` [source,javascript] diff --git a/docs/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc b/docs/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc index b2dfbc849bf..42a2d319ff7 100644 --- a/docs/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/derivative/derivative-aggregation-usage.asciidoc @@ -23,7 +23,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) - .MinimumDocumentCount(1) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm .Field(p => p.NumberOfCommits) @@ -43,7 +43,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, - MinimumDocumentCount = 1, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new DerivativeAggregation("commits_derivative", "commits") @@ -58,7 +58,7 @@ new DateHistogramAggregation("projects_started_per_month") "date_histogram": { "field": "startedOn", "interval": "month", - "min_doc_count": 1 + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc b/docs/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc index cb520dd74a3..7c23d8e769c 100644 --- a/docs/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/moving-average/moving-average-ewma-aggregation-usage.asciidoc @@ -23,7 +23,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) - .MinimumDocumentCount(1) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm .Field(p => p.NumberOfCommits) @@ -48,7 +48,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, - MinimumDocumentCount = 1, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new MovingAverageAggregation("commits_moving_avg", "commits") @@ -69,7 +69,7 @@ new DateHistogramAggregation("projects_started_per_month") "date_histogram": { "field": "startedOn", "interval": "month", - "min_doc_count": 1 + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc b/docs/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc index 169fb0ddfb1..f165e04479a 100644 --- a/docs/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/moving-average/moving-average-holt-linear-aggregation-usage.asciidoc @@ -23,7 +23,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) - .MinimumDocumentCount(1) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm.Field(p => p.NumberOfCommits)) .MovingAverage("commits_moving_avg", mv => mv @@ -47,7 +47,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, - MinimumDocumentCount = 1, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new MovingAverageAggregation("commits_moving_avg", "commits") @@ -69,7 +69,7 @@ new DateHistogramAggregation("projects_started_per_month") "date_histogram": { "field": "startedOn", "interval": "month", - "min_doc_count": 1 + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc b/docs/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc index d293f5f9a39..9d33da71f3f 100644 --- a/docs/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/moving-average/moving-average-holt-winters-aggregation-usage.asciidoc @@ -23,7 +23,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) - .MinimumDocumentCount(1) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm .Field(p => p.NumberOfCommits) @@ -54,7 +54,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, - MinimumDocumentCount = 1, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new MovingAverageAggregation("commits_moving_avg", "commits") @@ -81,7 +81,7 @@ new DateHistogramAggregation("projects_started_per_month") "date_histogram": { "field": "startedOn", "interval": "month", - "min_doc_count": 1 + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc b/docs/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc index e33a1bc7c69..dfa49c37050 100644 --- a/docs/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/moving-average/moving-average-linear-aggregation-usage.asciidoc @@ -23,6 +23,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm .Field(p => p.NumberOfCommits) @@ -46,6 +47,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new MovingAverageAggregation("commits_moving_avg", "commits") @@ -63,7 +65,8 @@ new DateHistogramAggregation("projects_started_per_month") "projects_started_per_month": { "date_histogram": { "field": "startedOn", - "interval": "month" + "interval": "month", + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc b/docs/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc index 05fa74c7dc5..564f53b56a4 100644 --- a/docs/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/moving-average/moving-average-simple-aggregation-usage.asciidoc @@ -23,6 +23,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm .Field(p => p.NumberOfCommits) @@ -48,6 +49,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new MovingAverageAggregation("commits_moving_avg", "commits") @@ -67,7 +69,8 @@ new DateHistogramAggregation("projects_started_per_month") "projects_started_per_month": { "date_histogram": { "field": "startedOn", - "interval": "month" + "interval": "month", + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/pipeline/moving-function/moving-function-aggregation-usage.asciidoc b/docs/aggregations/pipeline/moving-function/moving-function-aggregation-usage.asciidoc index d0d4fee73f0..e048e80c561 100644 --- a/docs/aggregations/pipeline/moving-function/moving-function-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/moving-function/moving-function-aggregation-usage.asciidoc @@ -33,6 +33,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm .Field(p => p.NumberOfCommits) @@ -55,6 +56,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new MovingFunctionAggregation("commits_moving_avg", "commits") @@ -73,7 +75,8 @@ new DateHistogramAggregation("projects_started_per_month") "projects_started_per_month": { "date_histogram": { "field": "startedOn", - "interval": "month" + "interval": "month", + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc b/docs/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc index 6485254cd42..e44a781e55e 100644 --- a/docs/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc +++ b/docs/aggregations/pipeline/serial-differencing/serial-differencing-aggregation-usage.asciidoc @@ -23,7 +23,7 @@ a => a .DateHistogram("projects_started_per_month", dh => dh .Field(p => p.StartedOn) .Interval(DateInterval.Month) - .MinimumDocumentCount(1) + .MinimumDocumentCount(0) .Aggregations(aa => aa .Sum("commits", sm => sm .Field(p => p.NumberOfCommits) @@ -44,7 +44,7 @@ new DateHistogramAggregation("projects_started_per_month") { Field = "startedOn", Interval = DateInterval.Month, - MinimumDocumentCount = 1, + MinimumDocumentCount = 0, Aggregations = new SumAggregation("commits", "numberOfCommits") && new SerialDifferencingAggregation("second_difference", "commits") @@ -62,7 +62,7 @@ new DateHistogramAggregation("projects_started_per_month") "date_histogram": { "field": "startedOn", "interval": "month", - "min_doc_count": 1 + "min_doc_count": 0 }, "aggs": { "commits": { diff --git a/docs/aggregations/writing-aggregations.asciidoc b/docs/aggregations/writing-aggregations.asciidoc index 4eccf9b646b..adf1cebe283 100644 --- a/docs/aggregations/writing-aggregations.asciidoc +++ b/docs/aggregations/writing-aggregations.asciidoc @@ -232,7 +232,6 @@ return s => s ); ---- <1> a list of aggregation functions to apply - <2> Using LINQ's `Aggregate()` function to accumulate/apply all of the aggregation functions [[handling-aggregate-response]] @@ -276,6 +275,5 @@ var maxPerChild = childAggregation.Max("max_per_child"); maxPerChild.Should().NotBeNull(); <2> ---- <1> Do something with the average per child. Here we just assert it's not null - <2> Do something with the max per child. Here we just assert it's not null diff --git a/docs/client-concepts/connection-pooling/building-blocks/connection-pooling.asciidoc b/docs/client-concepts/connection-pooling/building-blocks/connection-pooling.asciidoc index f70d442b8bb..633528b3b3e 100644 --- a/docs/client-concepts/connection-pooling/building-blocks/connection-pooling.asciidoc +++ b/docs/client-concepts/connection-pooling/building-blocks/connection-pooling.asciidoc @@ -97,7 +97,6 @@ var pool = new CloudConnectionPool(cloudId, credentials); <2> var client = new ElasticClient(new ConnectionSettings(pool)); ---- <1> a username and password that can access Elasticsearch service on Elastic Cloud - <2> `cloudId` is a value that can be retrieved from the Elastic Cloud web console This type of pool, like its parent the `SingleNodeConnectionPool`, is hardwired to opt out of diff --git a/docs/client-concepts/connection-pooling/building-blocks/request-pipelines.asciidoc b/docs/client-concepts/connection-pooling/building-blocks/request-pipelines.asciidoc index 3b57634c369..829e77fb6fc 100644 --- a/docs/client-concepts/connection-pooling/building-blocks/request-pipelines.asciidoc +++ b/docs/client-concepts/connection-pooling/building-blocks/request-pipelines.asciidoc @@ -26,7 +26,7 @@ the whole coordination of the request is deferred to a new instance in a `using` var pipeline = new RequestPipeline( settings, DateTimeProvider.Default, - new MemoryStreamFactory(), + new RecyclableMemoryStreamFactory(), new SearchRequestParameters()); pipeline.GetType().Should().Implement(); @@ -41,7 +41,7 @@ var requestPipelineFactory = new RequestPipelineFactory(); var requestPipeline = requestPipelineFactory.Create( settings, DateTimeProvider.Default, <1> - new MemoryStreamFactory(), + new RecyclableMemoryStreamFactory(), new SearchRequestParameters()); requestPipeline.Should().BeOfType(); @@ -58,7 +58,7 @@ var transport = new Transport( settings, requestPipelineFactory, DateTimeProvider.Default, - new MemoryStreamFactory()); + new RecyclableMemoryStreamFactory()); var client = new ElasticClient(transport); ---- diff --git a/docs/client-concepts/connection-pooling/exceptions/unexpected-exceptions.asciidoc b/docs/client-concepts/connection-pooling/exceptions/unexpected-exceptions.asciidoc index c765804e9db..07e2460bfd4 100644 --- a/docs/client-concepts/connection-pooling/exceptions/unexpected-exceptions.asciidoc +++ b/docs/client-concepts/connection-pooling/exceptions/unexpected-exceptions.asciidoc @@ -58,11 +58,8 @@ audit = await audit.TraceUnexpectedException( ); ---- <1> set up a cluster with 10 nodes - <2> where node 2 on port 9201 always throws an exception - <3> The first call to 9200 returns a healthy response - <4> ...but the second call, to 9201, returns a bad response Sometimes, an unexpected exception happens further down in the pipeline. In this scenario, we @@ -101,9 +98,7 @@ audit = await audit.TraceUnexpectedException( ); ---- <1> calls on 9200 set up to throw a `HttpRequestException` or a `WebException` - <2> calls on 9201 set up to throw an `Exception` - <3> Assert that the audit trail for the client call includes the bad response from 9200 and 9201 An unexpected hard exception on ping and sniff is something we *do* try to recover from and failover to retrying on the next node. @@ -148,8 +143,6 @@ audit = await audit.TraceUnexpectedException( ); ---- <1> `InnerException` is the exception that brought the request down - <2> The hard exception that happened on ping is still available though - <3> An exception can be hard to relate back to a point in time, so the exception is also available on the audit trail diff --git a/docs/client-concepts/connection-pooling/exceptions/unrecoverable-exceptions.asciidoc b/docs/client-concepts/connection-pooling/exceptions/unrecoverable-exceptions.asciidoc index 2ca84ccd493..9659d53ea91 100644 --- a/docs/client-concepts/connection-pooling/exceptions/unrecoverable-exceptions.asciidoc +++ b/docs/client-concepts/connection-pooling/exceptions/unrecoverable-exceptions.asciidoc @@ -81,7 +81,6 @@ var audit = new Auditor(() => VirtualClusterWith ); ---- <1> Always succeed on ping - <2> ...but always fail on calls with a 401 Bad Authentication response Now, let's make a client call. We'll see that the first audit event is a successful ping @@ -102,9 +101,7 @@ audit = await audit.TraceElasticsearchException( ); ---- <1> First call results in a successful ping - <2> Second call results in a bad response - <3> The reason for the bad response is Bad Authentication When a bad authentication response occurs, the client attempts to deserialize the response body returned; @@ -138,7 +135,6 @@ audit = await audit.TraceElasticsearchException( ); ---- <1> Always return a 401 bad response with a HTML response on client calls - <2> Assert that the response body bytes are null Now in this example, by turning on `DisableDirectStreaming()` on `ConnectionSettings`, we see the same behaviour exhibited @@ -173,6 +169,5 @@ audit = await audit.TraceElasticsearchException( ); ---- <1> Response bytes are set on the response - <2> Assert that the response contains `"nginx/"` diff --git a/docs/client-concepts/connection-pooling/max-retries/respects-max-retry.asciidoc b/docs/client-concepts/connection-pooling/max-retries/respects-max-retry.asciidoc index 478009877a3..ac0f906f7ee 100644 --- a/docs/client-concepts/connection-pooling/max-retries/respects-max-retry.asciidoc +++ b/docs/client-concepts/connection-pooling/max-retries/respects-max-retry.asciidoc @@ -84,7 +84,6 @@ audit = await audit.TraceCall( ); ---- <1> Set the maximum number of retries to 3 - <2> The client call trace returns an `MaxRetriesReached` audit after the initial attempt and the number of retries allowed In our previous example we simulated very fast failures, but in the real world, a call might take upwards of a second. diff --git a/docs/client-concepts/connection-pooling/pinging/first-usage.asciidoc b/docs/client-concepts/connection-pooling/pinging/first-usage.asciidoc index d4a9109ccd7..11e0766c648 100644 --- a/docs/client-concepts/connection-pooling/pinging/first-usage.asciidoc +++ b/docs/client-concepts/connection-pooling/pinging/first-usage.asciidoc @@ -92,13 +92,9 @@ await audit.TraceCalls( ); ---- <1> The first call goes to 9200, which succeeds - <2> The 2nd call does a ping on 9201 because its used for the first time. This fails - <3> So we ping 9202. This _also_ fails - <4> We then ping 9203 because we haven't used it before and it succeeds - <5> Finally, we assert that the connection pool has two nodes that are marked as dead All nodes are pinged on first use, provided they are healthy @@ -125,6 +121,5 @@ await audit.TraceCalls( ); ---- <1> Pings on nodes always succeed - <2> A successful ping on each node diff --git a/docs/client-concepts/connection-pooling/request-overrides/disable-sniff-ping-per-request.asciidoc b/docs/client-concepts/connection-pooling/request-overrides/disable-sniff-ping-per-request.asciidoc index 6f7878d70c5..33cfea2e03b 100644 --- a/docs/client-concepts/connection-pooling/request-overrides/disable-sniff-ping-per-request.asciidoc +++ b/docs/client-concepts/connection-pooling/request-overrides/disable-sniff-ping-per-request.asciidoc @@ -65,11 +65,8 @@ audit = await audit.TraceCalls( ); ---- <1> disable sniffing - <2> first call is a successful ping - <3> sniff on startup call happens here, on the second call - <4> No sniff on startup again Now, let's disable pinging on the request @@ -93,7 +90,6 @@ audit = await audit.TraceCall( ); ---- <1> disable ping - <2> No ping after sniffing Finally, let's demonstrate disabling both sniff and ping on the request @@ -115,6 +111,5 @@ audit = await audit.TraceCall( ); ---- <1> disable ping and sniff - <2> no ping or sniff before the call diff --git a/docs/client-concepts/connection-pooling/round-robin/skip-dead-nodes.asciidoc b/docs/client-concepts/connection-pooling/round-robin/skip-dead-nodes.asciidoc index ae22b1256dd..205e49e8ef0 100644 --- a/docs/client-concepts/connection-pooling/round-robin/skip-dead-nodes.asciidoc +++ b/docs/client-concepts/connection-pooling/round-robin/skip-dead-nodes.asciidoc @@ -140,9 +140,7 @@ await audit.TraceCalls( ); ---- <1> The first call goes to 9200 which succeeds - <2> The 2nd call does a ping on 9201 because its used for the first time. It fails so we wrap over to node 9202 - <3> The next call goes to 9203 which fails so we should wrap over A cluster with 2 nodes where the second node fails on ping @@ -193,6 +191,5 @@ await audit.TraceCalls( ); ---- <1> All the calls fail - <2> After all our registered nodes are marked dead we want to sample a single dead node each time to quickly see if the cluster is back up. We do not want to retry all 4 nodes diff --git a/docs/client-concepts/connection-pooling/sniffing/on-connection-failure.asciidoc b/docs/client-concepts/connection-pooling/sniffing/on-connection-failure.asciidoc index e98fa98535c..16111f5d7d5 100644 --- a/docs/client-concepts/connection-pooling/sniffing/on-connection-failure.asciidoc +++ b/docs/client-concepts/connection-pooling/sniffing/on-connection-failure.asciidoc @@ -79,13 +79,9 @@ audit = await audit.TraceCalls( ); ---- <1> When the call fails on 9201, the following sniff succeeds and returns a new cluster state of healthy nodes. This cluster only has 3 nodes and the known masters are 9200 and 9202. A search on 9201 is setup to still fail once - <2> After this second failure on 9201, another sniff will happen which returns a cluster state that no longer fails but looks completely different; It's now three nodes on ports 9210 - 9212, with 9210 and 9212 being master eligible. - <3> We assert we do a sniff on our first known master node 9202 after the failed call on 9201 - <4> Our pool should now have three nodes - <5> We assert we do a sniff on the first master node in our updated cluster ==== Sniffing after ping failure @@ -151,11 +147,8 @@ audit = await audit.TraceCalls( ); ---- <1> We assert we do a sniff on our first known master node 9202 - <2> Our pool should now have three nodes - <3> We assert we do a sniff on the first master node in our updated cluster - <4> 9210 was already pinged after the sniff returned the new nodes ==== Client uses publish address diff --git a/docs/client-concepts/connection-pooling/sniffing/on-startup.asciidoc b/docs/client-concepts/connection-pooling/sniffing/on-startup.asciidoc index bb5d9de64e8..795c564978f 100644 --- a/docs/client-concepts/connection-pooling/sniffing/on-startup.asciidoc +++ b/docs/client-concepts/connection-pooling/sniffing/on-startup.asciidoc @@ -120,7 +120,6 @@ await audit.TraceCall(new ClientCall { }); ---- <1> Sniffing returns 8 nodes, starting from 9204 - <2> After successfully sniffing, the ping now happens on 9204 ==== Prefers master eligible nodes diff --git a/docs/client-concepts/connection-pooling/sniffing/role-detection.asciidoc b/docs/client-concepts/connection-pooling/sniffing/role-detection.asciidoc index 66cc280e5eb..643c5fab767 100644 --- a/docs/client-concepts/connection-pooling/sniffing/role-detection.asciidoc +++ b/docs/client-concepts/connection-pooling/sniffing/role-detection.asciidoc @@ -138,7 +138,6 @@ var audit = new Auditor(() => VirtualClusterWith }; ---- <1> Before the sniff, assert we only see three master only nodes - <2> After the sniff, assert we now know about the existence of 20 nodes. After the sniff has happened on 9200 before the first API call, assert that the subsequent API @@ -219,9 +218,7 @@ var audit = new Auditor(() => VirtualClusterWith }; ---- <1> for testing simplicity, disable pings - <2> We only want to execute API calls to nodes in rack_one - <3> After sniffing on startup, assert that the pool of nodes that the client will execute API calls against only contains the three nodes that are in `rack_one` With the cluster set up, assert that the sniff happens on 9200 before the first API call @@ -298,8 +295,6 @@ await audit.TraceUnexpectedElasticsearchException(new ClientCall }); ---- <1> The audit trail indicates a sniff for the very first time on startup - <2> The sniff succeeds because the node predicate is ignored when sniffing - <3> when trying to do an actual API call however, the predicate prevents any nodes from being attempted diff --git a/docs/client-concepts/high-level/analysis/writing-analyzers.asciidoc b/docs/client-concepts/high-level/analysis/writing-analyzers.asciidoc index 23c3c663d43..65a12683aa1 100644 --- a/docs/client-concepts/high-level/analysis/writing-analyzers.asciidoc +++ b/docs/client-concepts/high-level/analysis/writing-analyzers.asciidoc @@ -100,7 +100,6 @@ var createIndexResponse = _client.Indices.Create("my-index", c => c ); ---- <1> Pre-defined list of English stopwords within Elasticsearch - <2> Use the `standard_english` analyzer configured [source,javascript] @@ -262,7 +261,6 @@ var createIndexResponse = _client.Indices.Create("questions", c => c ); ---- <1> Use an analyzer at index time that strips HTML tags - <2> Use an analyzer at search time that does not strip HTML tags With this in place, the text of a question body will be analyzed with the `index_question` analyzer diff --git a/docs/client-concepts/high-level/getting-started.asciidoc b/docs/client-concepts/high-level/getting-started.asciidoc index 9cb0d71bba6..5a14a0f8e00 100644 --- a/docs/client-concepts/high-level/getting-started.asciidoc +++ b/docs/client-concepts/high-level/getting-started.asciidoc @@ -107,7 +107,6 @@ var indexResponse = client.IndexDocument(person); <1> var asyncIndexResponse = await client.IndexDocumentAsync(person); <2> ---- <1> synchronous method that returns an `IndexResponse` - <2> asynchronous method that returns a `Task` that can be awaited NOTE: All methods available within NEST are exposed as both synchronous and asynchronous versions, diff --git a/docs/client-concepts/high-level/indexing/indexing-documents.asciidoc b/docs/client-concepts/high-level/indexing/indexing-documents.asciidoc index 7d02a5d3223..00706955bbc 100644 --- a/docs/client-concepts/high-level/indexing/indexing-documents.asciidoc +++ b/docs/client-concepts/high-level/indexing/indexing-documents.asciidoc @@ -40,7 +40,6 @@ if (!indexResponse.IsValid) var indexResponseAsync = await client.IndexDocumentAsync(person); <2> ---- <1> synchronous method that returns an IIndexResponse - <2> asynchronous method that returns a Task that can be awaited ==== Single documents with parameters @@ -62,7 +61,6 @@ client.Index(person, i => i.Index("people")); <1> client.Index(new IndexRequest(person, "people")); <2> ---- <1> fluent syntax - <2> object initializer syntax ==== Multiple documents with `IndexMany` @@ -113,11 +111,8 @@ if (indexManyResponse.Errors) <2> var indexManyAsyncResponse = await client.IndexManyAsync(people); <4> ---- <1> synchronous method that returns an IBulkResponse - <2> the response can be inspected to see if any of the bulk operations resulted in an error - <3> If there are errors, they can be enumerated and inspected - <4> asynchronous method that returns a Task that can be awaited ==== Multiple documents with bulk @@ -141,7 +136,6 @@ var asyncBulkIndexResponse = await client.BulkAsync(b => b .IndexMany(people)); <2> ---- <1> synchronous method that returns an IBulkResponse, the same as IndexMany and can be inspected in the same way for errors - <2> asynchronous method that returns a Task that can be awaited ==== Multiple documents with `BulkAllObservable` helper @@ -173,11 +167,8 @@ var bulkAllObservable = client.BulkAll(people, b => b }); ---- <1> how long to wait between retries - <2> how many retries are attempted if a failure occurs - <3> items per bulk request - <4> perform the indexing and wait up to 15 minutes, whilst the BulkAll calls are asynchronous this is a blocking operation ==== Advanced bulk indexing @@ -213,10 +204,7 @@ client.BulkAll(people, b => b })); ---- <1> customise the individual operations in the bulk request before it is dispatched - <2> Index each document into either even-index or odd-index - <3> decide if a document should be retried in the event of a failure - <4> if a document cannot be indexed this delegate is called diff --git a/docs/client-concepts/high-level/indexing/ingest-nodes.asciidoc b/docs/client-concepts/high-level/indexing/ingest-nodes.asciidoc index 18115a87a07..aa21a86dad7 100644 --- a/docs/client-concepts/high-level/indexing/ingest-nodes.asciidoc +++ b/docs/client-concepts/high-level/indexing/ingest-nodes.asciidoc @@ -57,6 +57,5 @@ var settings = new ConnectionSettings(pool).NodePredicate(n => n.IngestEnabled); var indexingClient = new ElasticClient(settings); ---- <1> list of cluster nodes - <2> predicate to select only nodes with ingest capabilities diff --git a/docs/client-concepts/high-level/indexing/pipelines.asciidoc b/docs/client-concepts/high-level/indexing/pipelines.asciidoc index e9b0a0d9f99..5691e27f468 100644 --- a/docs/client-concepts/high-level/indexing/pipelines.asciidoc +++ b/docs/client-concepts/high-level/indexing/pipelines.asciidoc @@ -92,19 +92,12 @@ var person = new Person var indexResponse = client.Index(person, p => p.Index("people").Pipeline("person-pipeline")); <8> ---- <1> automatically create the mapping from the type - <2> create an additional field to store the initials - <3> map field as IP Address type - <4> map GeoIp as object - <5> uppercase the lastname - <6> use a painless script to populate the new field - <7> use ingest-geoip plugin to enrich the GeoIp object from the supplied IP Address - <8> index the document using the created pipeline ==== Increasing timeouts @@ -129,6 +122,5 @@ client.Bulk(b => b ); ---- <1> increases the server-side bulk timeout - <2> increases the HTTP request timeout diff --git a/docs/client-concepts/high-level/inference/field-inference.asciidoc b/docs/client-concepts/high-level/inference/field-inference.asciidoc index 9c9eb585107..d4cf5986062 100644 --- a/docs/client-concepts/high-level/inference/field-inference.asciidoc +++ b/docs/client-concepts/high-level/inference/field-inference.asciidoc @@ -513,15 +513,10 @@ private class Precedence } ---- <1> Even though this property has various attributes applied we provide an override on ConnectionSettings later that takes precedence. - <2> Has a `TextAttribute`, `PropertyNameAttribute` and a `JsonPropertyAttribute` - the `TextAttribute` takes precedence. - <3> Has both a `PropertyNameAttribute` and a `JsonPropertyAttribute` - the `PropertyNameAttribute` takes precedence. - <4> `JsonPropertyAttribute` takes precedence. - <5> This property we are going to hard code in our custom serializer to resolve to ask. - <6> We are going to register a DefaultFieldNameInferrer on ConnectionSettings that will uppercase all properties. We'll create a custom `IPropertyMappingProvider` that renames any property named `AskSerializer` to `ask`. @@ -567,9 +562,7 @@ usingSettings.Expect("data").ForField(Field(p => p.DataMember)); usingSettings.Expect("DEFAULTFIELDNAMEINFERRER").ForField(Field(p => p.DefaultFieldNameInferrer)); ---- <1> Rename on the mapping for the `Precedence` type - <2> Default inference for a field, if no other rules apply or are specified for a given field - <3> Hook up the custom `IPropertyMappingProvider` The same naming rules also apply when indexing a document diff --git a/docs/client-concepts/high-level/inference/index-name-inference.asciidoc b/docs/client-concepts/high-level/inference/index-name-inference.asciidoc index 39b02f86a52..ce5c0098c8a 100644 --- a/docs/client-concepts/high-level/inference/index-name-inference.asciidoc +++ b/docs/client-concepts/high-level/inference/index-name-inference.asciidoc @@ -85,7 +85,6 @@ var client = new ElasticClient(settings); var projectSearchResponse = client.Search(); ---- <1> a default index to use, when no other index can be inferred - <2> a index to use when `Project` is the target POCO type will send a search request to the API endpoint diff --git a/docs/client-concepts/high-level/inference/indices-paths.asciidoc b/docs/client-concepts/high-level/inference/indices-paths.asciidoc index 23917830b8c..fbd3559e554 100644 --- a/docs/client-concepts/high-level/inference/indices-paths.asciidoc +++ b/docs/client-concepts/high-level/inference/indices-paths.asciidoc @@ -88,7 +88,6 @@ singleIndexFromIndexName.Match( ); ---- <1> `_all` will override any specific index names here - <2> The `Project` type has been mapped to a specific index name using <`>> [[nest-indices]] @@ -121,9 +120,7 @@ ISearchRequest singleTypedRequest = new SearchDescriptor().Index(single var invalidSingleString = Index("name1, name2"); <3> ---- <1> specifying a single index using a string - <2> specifying a single index using a type - <3> an **invalid** single index name ===== Multiple indices @@ -146,9 +143,7 @@ manyStringRequest = new SearchDescriptor().Index(new[] { "name1", "name ((IUrlParameter)manyStringRequest.Index).GetString(this.Client.ConnectionSettings).Should().Be("name1,name2"); ---- <1> specifying multiple indices using strings - <2> specifying multiple indices using types - <3> The index names here come from the Connection Settings passed to `TestClient`. See the documentation on <> for more details. ===== All Indices diff --git a/docs/client-concepts/high-level/mapping/auto-map.asciidoc b/docs/client-concepts/high-level/mapping/auto-map.asciidoc index 543e3c65876..3680833597f 100644 --- a/docs/client-concepts/high-level/mapping/auto-map.asciidoc +++ b/docs/client-concepts/high-level/mapping/auto-map.asciidoc @@ -62,7 +62,6 @@ var createIndexResponse = _client.Indices.Create("myindex", c => c ); ---- <1> Auto map `Company` using the generic method - <2> Auto map `Employee` using the non-generic method This produces the following JSON request diff --git a/docs/client-concepts/high-level/mapping/fluent-mapping.asciidoc b/docs/client-concepts/high-level/mapping/fluent-mapping.asciidoc index 67ad58d9e9a..826b6c2f15c 100644 --- a/docs/client-concepts/high-level/mapping/fluent-mapping.asciidoc +++ b/docs/client-concepts/high-level/mapping/fluent-mapping.asciidoc @@ -273,11 +273,8 @@ var createIndexResponse = _client.Indices.Create("myindex", c => c ); ---- <1> Automap company - <2> Override company inferred mappings - <3> Automap nested employee type - <4> Override employee inferred mappings [source,javascript] diff --git a/docs/client-concepts/high-level/mapping/ignoring-properties.asciidoc b/docs/client-concepts/high-level/mapping/ignoring-properties.asciidoc index c33c4a12e66..2fd0f9a3621 100644 --- a/docs/client-concepts/high-level/mapping/ignoring-properties.asciidoc +++ b/docs/client-concepts/high-level/mapping/ignoring-properties.asciidoc @@ -73,7 +73,6 @@ var createIndexResponse = client.Indices.Create("myindex", c => c ); ---- <1> we're using an in-memory connection, but in your application, you'll want to use an `IConnection` that actually sends a request. - <2> we disable direct streaming here to capture the request and response bytes. In a production application, you would likely not call this as it adds overhead to each call. The JSON output for the mapping does not contain the ignored properties @@ -185,7 +184,6 @@ var createIndexResponse = client.Indices.Create("myindex", c => c ); ---- <1> we're using an _in memory_ connection for this example. In your production application though, you'll want to use an `IConnection` that actually sends a request. - <2> we disable direct streaming here to capture the request and response bytes. In your production application however, you'll likely not want to do this, since it causes the request and response bytes to be buffered in memory. [source,javascript] diff --git a/docs/client-concepts/high-level/mapping/multi-fields.asciidoc b/docs/client-concepts/high-level/mapping/multi-fields.asciidoc index 305c1dfde04..08fa8aa1642 100644 --- a/docs/client-concepts/high-level/mapping/multi-fields.asciidoc +++ b/docs/client-concepts/high-level/mapping/multi-fields.asciidoc @@ -157,9 +157,7 @@ var createIndexResponse = _client.Indices.Create("myindex", c => c ); ---- <1> Use the stop analyzer on this sub field - <2> Use a custom analyzer named "named_shingles" that is configured in the index - <3> Index as not analyzed [source,javascript] diff --git a/docs/client-concepts/high-level/mapping/parent-child-relationships.asciidoc b/docs/client-concepts/high-level/mapping/parent-child-relationships.asciidoc index daef48c0c04..5ecc5ad3758 100644 --- a/docs/client-concepts/high-level/mapping/parent-child-relationships.asciidoc +++ b/docs/client-concepts/high-level/mapping/parent-child-relationships.asciidoc @@ -98,11 +98,8 @@ var createIndexResponse = client.Indices.Create("index", c => c ); ---- <1> recommended to make the routing field mandatory so you can not accidentally forget - <2> Map all of the `MyParent` properties - <3> Map all of the `MyChild` properties - <4> Additionally map the `JoinField` since it is not automatically mapped by `AutoMap()` We call `AutoMap()` for both types to discover properties of both .NET types. `AutoMap()` won't automatically setup the @@ -176,7 +173,6 @@ parentDocument = new MyParent var indexParent = client.IndexDocument(parentDocument); ---- <1> this lets the join data type know this is a root document of type `myparent` - <2> this lets the join data type know this is a root document of type `myparent` [source,javascript] diff --git a/docs/client-concepts/high-level/mapping/visitor-pattern-mapping.asciidoc b/docs/client-concepts/high-level/mapping/visitor-pattern-mapping.asciidoc index cf08840833a..b26afdb70d4 100644 --- a/docs/client-concepts/high-level/mapping/visitor-pattern-mapping.asciidoc +++ b/docs/client-concepts/high-level/mapping/visitor-pattern-mapping.asciidoc @@ -65,7 +65,6 @@ public class DisableDocValuesPropertyVisitor : NoopPropertyVisitor } ---- <1> Override the `Visit` method on `INumberProperty` and set `DocValues = false` - <2> Similarily, override the `Visit` method on `IBooleanProperty` and set `DocValues = false` Now we can pass an instance of our custom visitor to `.AutoMap()` diff --git a/docs/client-concepts/low-level/getting-started.asciidoc b/docs/client-concepts/low-level/getting-started.asciidoc index d48d044ce9a..c2e1a23de6a 100644 --- a/docs/client-concepts/low-level/getting-started.asciidoc +++ b/docs/client-concepts/low-level/getting-started.asciidoc @@ -106,7 +106,6 @@ var asyncIndexResponse = await lowlevelClient.IndexAsync("people string responseString = asyncIndexResponse.Body; ---- <1> synchronous method that returns an `IndexResponse` - <2> asynchronous method that returns a `Task` that can be awaited NOTE: All available methods within Elasticsearch.Net are exposed as both synchronous and asynchronous versions, @@ -243,9 +242,7 @@ var successOrKnownError = searchResponse.SuccessOrKnownError; <2> var exception = searchResponse.OriginalException; <3> ---- <1> Response is in the 200 range, or an expected response for the given request - <2> Response is successful, or has a response code between 400-599 that indicates the request cannot be retried. - <3> If the response is unsuccessful, will hold the original exception. Using these details, it is possible to make decisions around what should be done in your application. diff --git a/docs/client-concepts/troubleshooting/diagnostic-source.asciidoc b/docs/client-concepts/troubleshooting/diagnostic-source.asciidoc index a71e520d177..5f8839113cb 100644 --- a/docs/client-concepts/troubleshooting/diagnostic-source.asciidoc +++ b/docs/client-concepts/troubleshooting/diagnostic-source.asciidoc @@ -124,8 +124,6 @@ using (var subscription = DiagnosticListener.AllListeners.Subscribe(listenerObse } ---- <1> use a sniffing connection pool that sniffs on startup and pings before first usage, so our diagnostics will emit most topics. - <2> make a search API call - <3> verify that the listener is picking up events diff --git a/docs/client-concepts/troubleshooting/logging-with-on-request-completed.asciidoc b/docs/client-concepts/troubleshooting/logging-with-on-request-completed.asciidoc index 6bbb46bcf12..e6a4f10cf83 100644 --- a/docs/client-concepts/troubleshooting/logging-with-on-request-completed.asciidoc +++ b/docs/client-concepts/troubleshooting/logging-with-on-request-completed.asciidoc @@ -37,9 +37,7 @@ await client.RootNodeInfoAsync(); <3> counter.Should().Be(2); ---- <1> Construct a client - <2> Make a synchronous call and assert the counter is incremented - <3> Make an asynchronous call and assert the counter is incremented `OnRequestCompleted` is called even when an exception is thrown, so it can be used even if the client is @@ -63,9 +61,7 @@ await Assert.ThrowsAsync(async () => await client. counter.Should().Be(2); ---- <1> Configure a client with a connection that **always returns a HTTP 500 response - <2> Always throw exceptions when a call results in an exception - <3> Assert an exception is thrown and the counter is incremented Here's an example using `OnRequestCompleted()` for more complex logging @@ -144,15 +140,10 @@ list.Should().BeEquivalentTo(new[] <6> }); ---- <1> Here we use `InMemoryConnection` but in a real application, you'd use an `IConnection` that _actually_ sends the request, such as `HttpConnection` - <2> Disable direct streaming so we can capture the request and response bytes - <3> Perform some action when a request completes. Here, we're just adding to a list, but in your application you may be logging to a file. - <4> Make a synchronous call - <5> Make an asynchronous call - <6> Assert the list contains the contents written in the delegate passed to `OnRequestCompleted` When running an application in production, you probably don't want to disable direct streaming for _all_ @@ -230,10 +221,7 @@ list.Should().BeEquivalentTo(new[] }); ---- <1> Make a synchronous call where the request and response bytes will not be buffered - <2> Make an asynchronous call where `DisableDirectStreaming()` is enabled - <3> Only the method and url for the first request is captured - <4> the body of the second request is captured diff --git a/docs/query-dsl.asciidoc b/docs/query-dsl.asciidoc index cb77994271c..dbd4bf2de7f 100644 --- a/docs/query-dsl.asciidoc +++ b/docs/query-dsl.asciidoc @@ -49,13 +49,13 @@ NEST exposes all of the full text queries available in Elasticsearch * <> -* <> - * <> +* <> + * <> -* <> +* <> * <> @@ -71,13 +71,13 @@ include::query-dsl/full-text/common-terms/common-terms-usage.asciidoc[] include::query-dsl/full-text/intervals/intervals-usage.asciidoc[] -include::query-dsl/full-text/match/match-usage.asciidoc[] - include::query-dsl/full-text/match-bool-prefix/match-bool-prefix-usage.asciidoc[] +include::query-dsl/full-text/match-phrase-prefix/match-phrase-prefix-usage.asciidoc[] + include::query-dsl/full-text/match-phrase/match-phrase-usage.asciidoc[] -include::query-dsl/full-text/match-phrase-prefix/match-phrase-prefix-usage.asciidoc[] +include::query-dsl/full-text/match/match-usage.asciidoc[] include::query-dsl/full-text/multi-match/multi-match-usage.asciidoc[] @@ -122,14 +122,14 @@ NEST exposes all of the term queries available in Elasticsearch * <> +* <> + * <> * <> * <> -* <> - * <> See the Elasticsearch documentation on {ref_current}/term-level-queries.html[Term level queries] for more details. @@ -160,14 +160,14 @@ include::query-dsl/term-level/regexp/regexp-query-usage.asciidoc[] include::query-dsl/term-level/term/term-query-usage.asciidoc[] +include::query-dsl/term-level/terms-set/terms-set-query-usage.asciidoc[] + include::query-dsl/term-level/terms/terms-list-query-usage.asciidoc[] include::query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc[] include::query-dsl/term-level/terms/terms-query-usage.asciidoc[] -include::query-dsl/term-level/terms-set/terms-set-query-usage.asciidoc[] - include::query-dsl/term-level/wildcard/wildcard-query-usage.asciidoc[] [[compound-queries]] @@ -283,10 +283,10 @@ Specialized types of queries that do not fit into other groups * <> -* <> - * <> +* <> + * <> See the Elasticsearch documentation on {ref_current}/specialized-queries.html[Specialized queries] for more details. @@ -305,10 +305,10 @@ include::query-dsl/specialized/pinned/pinned-query-usage.asciidoc[] include::query-dsl/specialized/rank-feature/rank-feature-query-usage.asciidoc[] -include::query-dsl/specialized/script/script-query-usage.asciidoc[] - include::query-dsl/specialized/script-score/script-score-query-usage.asciidoc[] +include::query-dsl/specialized/script/script-query-usage.asciidoc[] + include::query-dsl/specialized/shape/shape-query-usage.asciidoc[] [[span-queries]] diff --git a/docs/query-dsl/bool-dsl/bool-dsl.asciidoc b/docs/query-dsl/bool-dsl/bool-dsl.asciidoc index d56321da4d4..2b53e902b9d 100644 --- a/docs/query-dsl/bool-dsl/bool-dsl.asciidoc +++ b/docs/query-dsl/bool-dsl/bool-dsl.asciidoc @@ -207,9 +207,7 @@ Assert( ); ---- <1> three queries `&&` together using the Fluent API - <2> three queries `&&` together using Object Initialzer syntax - <3> assert the resulting `bool` query in each case has 3 `must` clauses [[unary-negation-operator]] @@ -269,9 +267,7 @@ Assert( c => c.Bool.MustNot.Should().HaveCount(2)); <3> ---- <1> two queries with `!` operator applied, `&&` together using the Fluent API - <2> two queries with `!` operator applied, `&&` together using the Object Initializer syntax - <3> assert the resulting `bool` query in each case has two `must_not` clauses [[unary-plus-operator]] diff --git a/docs/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc b/docs/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc index 4aaa2d4a0fc..eb1a70ae00b 100644 --- a/docs/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc +++ b/docs/query-dsl/term-level/terms/terms-lookup-query-usage.asciidoc @@ -26,7 +26,7 @@ q .Field(p => p.Description) .TermsLookup(e => e .Path(p => p.LastName) - .Id(12) + .Id("12") .Routing("myroutingvalue") ) ) @@ -43,7 +43,7 @@ new TermsQuery Field = "description", TermsLookup = new FieldLookup { - Id = 12, + Id = "12", Index = Index(), Path = Field(p => p.LastName), Routing = "myroutingvalue" @@ -59,7 +59,7 @@ new TermsQuery "_name": "named_query", "boost": 1.1, "description": { - "id": 12, + "id": "12", "index": "devs", "path": "lastName", "routing": "myroutingvalue" diff --git a/docs/search/returned-fields.asciidoc b/docs/search/returned-fields.asciidoc index 14a509734e4..c11784197c0 100644 --- a/docs/search/returned-fields.asciidoc +++ b/docs/search/returned-fields.asciidoc @@ -114,9 +114,7 @@ var searchResponse = _client.Search(s => s ); ---- <1> **Include** the following fields - <2> **Exclude** the following fields - <3> Fields can be included or excluded through wildcard patterns With source filtering specified on the request, `.Documents` will diff --git a/docs/search/writing-queries.asciidoc b/docs/search/writing-queries.asciidoc index 8cd0ffd43ab..69589caadbe 100644 --- a/docs/search/writing-queries.asciidoc +++ b/docs/search/writing-queries.asciidoc @@ -277,9 +277,7 @@ var searchResponse = _client.Search(s => s ); ---- <1> match documents where lead developer first name contains Russ - <2> ...and where the lead developer last name contains Cam - <3> ...and where the project started in 2017 which yields the following query JSON @@ -355,7 +353,6 @@ searchResponse = _client.Search(s => s ); ---- <1> combine queries using the binary `&&` operator - <2> wrap a query in a `bool` query filter clause using the unary `+` operator and combine using the binary `&&` operator Take a look at the dedicated section on <> for more detail