diff --git a/docs/reference/connector/docs/connectors-salesforce.asciidoc b/docs/reference/connector/docs/connectors-salesforce.asciidoc index c640751de92c0..f5c5512ad5cc4 100644 --- a/docs/reference/connector/docs/connectors-salesforce.asciidoc +++ b/docs/reference/connector/docs/connectors-salesforce.asciidoc @@ -200,7 +200,7 @@ Once the permissions are set, assign the Profiles, Permission Set or Permission Follow these steps in Salesforce: 1. Navigate to `Administration` under the `Users` section. -2. Select `Users` and choose the user to set the permissions to. +2. Select `Users` and choose the user to set the permissions to. 3. Set the `Profile`, `Permission Set` or `Permission Set Groups` created in the earlier steps. [discrete#es-connectors-salesforce-sync-rules] @@ -249,7 +249,7 @@ Allowed values are *SOQL* and *SOSL*. [ { "query": "FIND {Salesforce} IN ALL FIELDS", - "language": "SOSL" + "language": "SOSL" } ] ---- @@ -381,7 +381,13 @@ See <> for more specifics o [discrete#es-connectors-salesforce-known-issues] ===== Known issues -There are currently no known issues for this connector. +* *DLS feature is "type-level" not "document-level"* ++ +Salesforce DLS, added in 8.13.0, does not accomodate specific access controls to specific Salesforce Objects. +Instead, if a given user/group can have access to _any_ Objects of a given type (`Case`, `Lead`, `Opportunity`, etc), that user/group will appear in the `\_allow_access_control` list for _all_ of the Objects of that type. +See https://github.com/elastic/connectors/issues/3028 for more details. ++ + Refer to <> for a list of known issues for all connectors. [discrete#es-connectors-salesforce-security] @@ -396,7 +402,7 @@ This connector is built with the {connectors-python}[Elastic connector framework View the {connectors-python}/connectors/sources/salesforce.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== @@ -598,7 +604,7 @@ Once the permissions are set, assign the Profiles, Permission Set or Permission Follow these steps in Salesforce: 1. Navigate to `Administration` under the `Users` section. -2. Select `Users` and choose the user to set the permissions to. +2. Select `Users` and choose the user to set the permissions to. 3. Set the `Profile`, `Permission Set` or `Permission Set Groups` created in the earlier steps. [discrete#es-connectors-salesforce-client-sync-rules] @@ -648,7 +654,7 @@ Allowed values are *SOQL* and *SOSL*. [ { "query": "FIND {Salesforce} IN ALL FIELDS", - "language": "SOSL" + "language": "SOSL" } ] ---- @@ -781,7 +787,13 @@ See <> for more specifics o [discrete#es-connectors-salesforce-client-known-issues] ===== Known issues -There are currently no known issues for this connector. +* *DLS feature is "type-level" not "document-level"* ++ +Salesforce DLS, added in 8.13.0, does not accomodate specific access controls to specific Salesforce Objects. +Instead, if a given user/group can have access to _any_ Objects of a given type (`Case`, `Lead`, `Opportunity`, etc), that user/group will appear in the `\_allow_access_control` list for _all_ of the Objects of that type. +See https://github.com/elastic/connectors/issues/3028 for more details. ++ + Refer to <> for a list of known issues for all connectors. [discrete#es-connectors-salesforce-client-security] @@ -797,5 +809,5 @@ This connector is built with the {connectors-python}[Elastic connector framework View the {connectors-python}/connectors/sources/salesforce.py[source code for this connector^] (branch _{connectors-branch}_, compatible with Elastic _{minor-version}_). -// Closing the collapsible section +// Closing the collapsible section =============== diff --git a/docs/reference/data-streams/logs.asciidoc b/docs/reference/data-streams/logs.asciidoc index 6bb98684544a3..3af5e09889a89 100644 --- a/docs/reference/data-streams/logs.asciidoc +++ b/docs/reference/data-streams/logs.asciidoc @@ -1,18 +1,20 @@ [[logs-data-stream]] == Logs data stream -preview::[Logs data streams and the logsdb index mode are in tech preview and may be changed or removed in the future. Don't use logs data streams or logsdb index mode in production.] +IMPORTANT: The {es} `logsdb` index mode is generally available in Elastic Cloud Hosted +and self-managed Elasticsearch as of version 8.17, and is enabled by default for +logs in https://www.elastic.co/elasticsearch/serverless[{serverless-full}]. A logs data stream is a data stream type that stores log data more efficiently. In benchmarks, log data stored in a logs data stream used ~2.5 times less disk space than a regular data -stream. The exact impact will vary depending on your data set. +stream. The exact impact varies by data set. [discrete] [[how-to-use-logsds]] === Create a logs data stream -To create a logs data stream, set your index template `index.mode` to `logsdb`: +To create a logs data stream, set your <> `index.mode` to `logsdb`: [source,console] ---- @@ -31,10 +33,12 @@ PUT _index_template/my-index-template // TEST <1> The index mode setting. -<2> The index template priority. By default, Elasticsearch ships with an index template with a `logs-*-*` pattern with a priority of 100. You need to define a priority higher than 100 to ensure that this index template gets selected over the default index template for the `logs-*-*` pattern. See the <> for more information. +<2> The index template priority. By default, Elasticsearch ships with a `logs-*-*` index template with a priority of 100. To make sure your index template takes priority over the default `logs-*-*` template, set its `priority` to a number higher than 100. For more information, see <>. After the index template is created, new indices that use the template will be configured as a logs data stream. You can start indexing data and <>. +You can also set the index mode and adjust other template settings in <>. + //// [source,console] ---- @@ -46,154 +50,159 @@ DELETE _index_template/my-index-template [[logsdb-default-settings]] [discrete] -[[logsdb-synthtic-source]] +[[logsdb-synthetic-source]] === Synthetic source -By default, `logsdb` mode uses <>, which omits storing the original `_source` -field and synthesizes it from doc values or stored fields upon document retrieval. Synthetic source comes with a few -restrictions which you can read more about in the <> section dedicated to it. +If you have the required https://www.elastic.co/subscriptions[subscription], `logsdb` index mode uses <>, which omits storing the original `_source` +field. Instead, the document source is synthesized from doc values or stored fields upon document retrieval. -NOTE: When dealing with multi-value fields, the `index.mapping.synthetic_source_keep` setting controls how field values -are preserved for <> reconstruction. In `logsdb`, the default value is `arrays`, -which retains both duplicate values and the order of entries but not necessarily the exact structure when it comes to -array elements or objects. Preserving duplicates and ordering could be critical for some log fields. This could be the -case, for instance, for DNS A records, HTTP headers, or log entries that represent sequential or repeated events. +If you don't have the required https://www.elastic.co/subscriptions[subscription], `logsdb` mode uses the original `_source` field. -For more details on this setting and ways to refine or bypass it, check out <>. +Before using synthetic source, make sure to review the <>. + +When working with multi-value fields, the `index.mapping.synthetic_source_keep` setting controls how field values +are preserved for <> reconstruction. In `logsdb`, the default value is `arrays`, +which retains both duplicate values and the order of entries. However, the exact structure of +array elements and objects is not necessarily retained. Preserving duplicates and ordering can be critical for some +log fields, such as DNS A records, HTTP headers, and log entries that represent sequential or repeated events. [discrete] [[logsdb-sort-settings]] === Index sort settings -The following settings are applied by default when using the `logsdb` mode for index sorting: +In `logsdb` index mode, the following sort settings are applied by default: -* `index.sort.field`: `["host.name", "@timestamp"]` - In `logsdb` mode, indices are sorted by `host.name` and `@timestamp` fields by default. For data streams, the - `@timestamp` field is automatically injected if it is not present. +`index.sort.field`: `["host.name", "@timestamp"]`:: +Indices are sorted by `host.name` and `@timestamp` by default. The `@timestamp` field is automatically injected if it is not present. -* `index.sort.order`: `["desc", "desc"]` - The default sort order for both fields is descending (`desc`), prioritizing the latest data. +`index.sort.order`: `["desc", "desc"]`:: +Both `host.name` and `@timestamp` are sorted in descending (`desc`) order, prioritizing the latest data. -* `index.sort.mode`: `["min", "min"]` - The default sort mode is `min`, ensuring that indices are sorted by the minimum value of multi-value fields. +`index.sort.mode`: `["min", "min"]`:: +The `min` mode sorts indices by the minimum value of multi-value fields. -* `index.sort.missing`: `["_first", "_first"]` - Missing values are sorted to appear first (`_first`) in `logsdb` index mode. +`index.sort.missing`: `["_first", "_first"]`:: +Missing values are sorted to appear `_first`. -`logsdb` index mode allows users to override the default sort settings. For instance, users can specify their own fields -and order for sorting by modifying the `index.sort.field` and `index.sort.order`. +You can override these default sort settings. For example, to sort on different fields +and change the order, manually configure `index.sort.field` and `index.sort.order`. For more details, see +<>. -When using default sort settings, the `host.name` field is automatically injected into the mappings of the -index as a `keyword` field to ensure that sorting can be applied. This guarantees that logs are efficiently sorted and -retrieved based on the `host.name` and `@timestamp` fields. +When using the default sort settings, the `host.name` field is automatically injected into the index mappings as a `keyword` field to ensure that sorting can be applied. This guarantees that logs are efficiently sorted and retrieved based on the `host.name` and `@timestamp` fields. -NOTE: If `subobjects` is set to `true` (which is the default), the `host.name` field will be mapped as an object field -named `host`, containing a `name` child field of type `keyword`. On the other hand, if `subobjects` is set to `false`, -a single `host.name` field will be mapped as a `keyword` field. +NOTE: If `subobjects` is set to `true` (default), the `host` field is mapped as an object field +named `host` with a `name` child field of type `keyword`. If `subobjects` is set to `false`, +a single `host.name` field is mapped as a `keyword` field. -Once an index is created, the sort settings are immutable and cannot be modified. To apply different sort settings, -a new index must be created with the desired configuration. For data streams, this can be achieved by means of an index -rollover after updating relevant (component) templates. +To apply different sort settings to an existing data stream, update the data stream's component templates, and then +perform or wait for a <>. -If the default sort settings are not suitable for your use case, consider modifying them. Keep in mind that sort -settings can influence indexing throughput, query latency, and may affect compression efficiency due to the way data -is organized after sorting. For more details, refer to our documentation on -<>. - -NOTE: For <>, the `@timestamp` field is automatically injected if not already present. -However, if custom sort settings are applied, the `@timestamp` field is injected into the mappings, but it is not +NOTE: In `logsdb` mode, the `@timestamp` field is automatically injected if it's not already present. If you apply custom sort settings, the `@timestamp` field is injected into the mappings but is not automatically added to the list of sort fields. [discrete] -[[logsdb-specialized-codecs]] -=== Specialized codecs +[[logsdb-host-name]] +==== Existing data streams -`logsdb` index mode uses the `best_compression` <> by default, which applies {wikipedia}/Zstd[ZSTD] -compression to stored fields. Users are allowed to override it and switch to the `default` codec for faster compression -at the expense of slightly larger storage footprint. +If you're enabling `logsdb` index mode on a data stream that already exists, make sure to check mappings and sorting. The `logsdb` mode automatically maps `host.name` as a keyword if it's included in the sort settings. If a `host.name` field already exists but has a different type, mapping errors might occur, preventing `logsdb` mode from being fully applied. -`logsdb` index mode also adopts specialized codecs for numeric doc values that are crafted to optimize storage usage. -Users can rely on these specialized codecs being applied by default when using `logsdb` index mode. +To avoid mapping conflicts, consider these options: -Doc values encoding for numeric fields in `logsdb` follows a static sequence of codecs, applying each one in the -following order: delta encoding, offset encoding, Greatest Common Divisor GCD encoding, and finally Frame Of Reference -(FOR) encoding. The decision to apply each encoding is based on heuristics determined by the data distribution. -For example, before applying delta encoding, the algorithm checks if the data is monotonically non-decreasing or -non-increasing. If the data fits this pattern, delta encoding is applied; otherwise, the next encoding is considered. +* **Adjust mappings:** Check your existing mappings to ensure that `host.name` is mapped as a keyword. -The encoding is specific to each Lucene segment and is also re-applied at segment merging time. The merged Lucene segment -may use a different encoding compared to the original Lucene segments, based on the characteristics of the merged data. +* **Change sorting:** If needed, you can remove `host.name` from the sort settings and use a different set of fields. Sorting by `@timestamp` can be a good fallback. + +* **Switch to a different <>**: If resolving `host.name` mapping conflicts is not feasible, you can choose not to use `logsdb` mode. + +IMPORTANT: On existing data streams, `logsdb` mode is applied on <> (automatic or manual). + +[discrete] +[[logsdb-specialized-codecs]] +=== Specialized codecs -The following methods are applied sequentially: +By default, `logsdb` index mode uses the `best_compression` <>, which applies {wikipedia}/Zstd[ZSTD] +compression to stored fields. You can switch to the `default` codec for faster compression with a slightly larger storage footprint. + +The `logsdb` index mode also automatically applies specialized codecs for numeric doc values, in order to optimize storage usage. Numeric fields are +encoded using the following sequence of codecs: * **Delta encoding**: - a compression method that stores the difference between consecutive values instead of the actual values. + Stores the difference between consecutive values instead of the actual values. * **Offset encoding**: - a compression method that stores the difference from a base value rather than between consecutive values. + Stores the difference from a base value rather than between consecutive values. * **Greatest Common Divisor (GCD) encoding**: - a compression method that finds the greatest common divisor of a set of values and stores the differences - as multiples of the GCD. + Finds the greatest common divisor of a set of values and stores the differences as multiples of the GCD. * **Frame Of Reference (FOR) encoding**: - a compression method that determines the smallest number of bits required to encode a block of values and uses + Determines the smallest number of bits required to encode a block of values and uses bit-packing to fit such values into larger 64-bit blocks. +Each encoding is evaluated according to heuristics determined by the data distribution. +For example, the algorithm checks whether the data is monotonically non-decreasing or +non-increasing. If so, delta encoding is applied; otherwise, the process +continues with the next encoding method (offset). + +Encoding is specific to each Lucene segment and is reapplied when segments are merged. The merged Lucene segment +might use a different encoding than the original segments, depending on the characteristics of the merged data. + For keyword fields, **Run Length Encoding (RLE)** is applied to the ordinals, which represent positions in the Lucene segment-level keyword dictionary. This compression is used when multiple consecutive documents share the same keyword. [discrete] [[logsdb-ignored-settings]] -=== `ignore_malformed`, `ignore_above`, `ignore_dynamic_beyond_limit` +=== `ignore` settings + +The `logsdb` index mode uses the following `ignore` settings. You can override these settings as needed. + +[discrete] +[[logsdb-ignore-malformed]] +==== `ignore_malformed` -By default, `logsdb` index mode sets `ignore_malformed` to `true`. This setting allows documents with malformed fields -to be indexed without causing indexing failures, ensuring that log data ingestion continues smoothly even when some -fields contain invalid or improperly formatted data. +By default, `logsdb` index mode sets `ignore_malformed` to `true`. With this setting, documents with malformed fields +can be indexed without causing ingestion failures. -Users can override this setting by setting `index.mapping.ignore_malformed` to `false`. However, this is not recommended -as it might result in documents with malformed fields being rejected and not indexed at all. +[discrete] +[[logs-db-ignore-above]] +==== `ignore_above` In `logsdb` index mode, the `index.mapping.ignore_above` setting is applied by default at the index level to ensure -efficient storage and indexing of large keyword fields.The index-level default for `ignore_above` is set to 8191 -**characters**. If using UTF-8 encoding, this results in a limit of 32764 bytes, depending on character encoding. -The mapping-level `ignore_above` setting still takes precedence. If a specific field has an `ignore_above` value -defined in its mapping, that value will override the index-level `index.mapping.ignore_above` value. This default -behavior helps to optimize indexing performance by preventing excessively large string values from being indexed, while -still allowing users to customize the limit, overriding it at the mapping level or changing the index level default -setting. +efficient storage and indexing of large keyword fields.The index-level default for `ignore_above` is 8191 +_characters._ Using UTF-8 encoding, this results in a limit of 32764 bytes, depending on character encoding. + +The mapping-level `ignore_above` setting takes precedence. If a specific field has an `ignore_above` value +defined in its mapping, that value overrides the index-level `index.mapping.ignore_above` value. This default +behavior helps to optimize indexing performance by preventing excessively large string values from being indexed. + +If you need to customize the limit, you can override it at the mapping level or change the index level default. + +[discrete] +[[logs-db-ignore-limit]] +==== `ignore_dynamic_beyond_limit` In `logsdb` index mode, the setting `index.mapping.total_fields.ignore_dynamic_beyond_limit` is set to `true` by -default. This allows dynamically mapped fields to be added on top of statically defined fields without causing document -rejection, even after the total number of fields exceeds the limit defined by `index.mapping.total_fields.limit`. The -`index.mapping.total_fields.limit` setting specifies the maximum number of fields an index can have (static, dynamic -and runtime). When the limit is reached, new dynamically mapped fields will be ignored instead of failing the document -indexing, ensuring continued log ingestion without errors. +default. This setting allows dynamically mapped fields to be added on top of statically defined fields, even when the total number of fields exceeds the `index.mapping.total_fields.limit`. Instead of triggering an index failure, additional dynamically mapped fields are ignored so that ingestion can continue. -NOTE: When automatically injected, `host.name` and `@timestamp` contribute to the limit of mapped fields. When -`host.name` is mapped with `subobjects: true` it consists of two fields. When `host.name` is mapped with -`subobjects: false` it only consists of one field. +NOTE: When automatically injected, `host.name` and `@timestamp` count toward the limit of mapped fields. If `host.name` is mapped with `subobjects: true`, it has two fields. When mapped with `subobjects: false`, `host.name` has only one field. [discrete] [[logsdb-nodocvalue-fields]] -=== Fields without doc values +=== Fields without `doc_values` -When `logsdb` index mode uses synthetic `_source`, and `doc_values` are disabled for a field in the mapping, -Elasticsearch may set the `store` setting to `true` for that field as a last resort option to ensure that the field's -data is still available for reconstructing the document’s source when retrieving it via +When the `logsdb` index mode uses synthetic `_source` and `doc_values` are disabled for a field in the mapping, +{es} might set the `store` setting to `true` for that field. This ensures that the field's +data remains accessible for reconstructing the document's source when using <>. -For example, this happens with text fields when `store` is `false` and there is no suitable multi-field available to -reconstruct the original value in <>. - -This automatic adjustment allows synthetic source to work correctly, even when doc values are not enabled for certain -fields. +For example, this adjustment occurs with text fields when `store` is `false` and no suitable multi-field is available for +reconstructing the original value. [discrete] [[logsdb-settings-summary]] -=== LogsDB settings summary +=== Settings reference -The following is a summary of key settings that apply when using `logsdb` index mode in Elasticsearch: +The `logsdb` index mode uses the following settings: * **`index.mode`**: `"logsdb"` diff --git a/docs/reference/data-streams/tsds.asciidoc b/docs/reference/data-streams/tsds.asciidoc index d0d6d4a455c63..1e1d56e5b4d93 100644 --- a/docs/reference/data-streams/tsds.asciidoc +++ b/docs/reference/data-streams/tsds.asciidoc @@ -17,7 +17,7 @@ metrics data. Only use a TSDS if you typically add metrics data to {es} in near real-time and `@timestamp` order. A TSDS is only intended for metrics data. For other timestamped data, such as -logs or traces, use a regular data stream. +logs or traces, use a <> or regular data stream. [discrete] [[differences-from-regular-data-stream]] diff --git a/docs/reference/images/index-mgmt/management-data-stream-fields.png b/docs/reference/images/index-mgmt/management-data-stream-fields.png new file mode 100644 index 0000000000000..605d49b80ab1f Binary files /dev/null and b/docs/reference/images/index-mgmt/management-data-stream-fields.png differ diff --git a/docs/reference/images/index-mgmt/management-data-stream.png b/docs/reference/images/index-mgmt/management-data-stream.png deleted file mode 100644 index 01534fdec2a23..0000000000000 Binary files a/docs/reference/images/index-mgmt/management-data-stream.png and /dev/null differ diff --git a/docs/reference/images/index-mgmt/management-index-templates.png b/docs/reference/images/index-mgmt/management-index-templates.png index 9188aa85e68cd..1ed004e85e71d 100644 Binary files a/docs/reference/images/index-mgmt/management-index-templates.png and b/docs/reference/images/index-mgmt/management-index-templates.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 1c8f1db216b75..d9b8f8802a04b 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -113,10 +113,9 @@ Index mode supports the following values: `standard`::: Standard indexing with default settings. -`time_series`::: Index mode optimized for storage of metrics documented in <>. +`tsds`::: _(data streams only)_ Index mode optimized for storage of metrics. For more information, see <>. -`logsdb`::: Index mode optimized for storage of logs. It applies default sort settings on the `hostname` and `timestamp` fields and uses <>. <> on different fields is still allowed. -preview:[] +`logsdb`::: _(data streams only)_ Index mode optimized for <>. [[routing-partition-size]] `index.routing_partition_size`:: diff --git a/docs/reference/indices/index-mgmt.asciidoc b/docs/reference/indices/index-mgmt.asciidoc index 7a78f9452b85e..73643dbfd4b3b 100644 --- a/docs/reference/indices/index-mgmt.asciidoc +++ b/docs/reference/indices/index-mgmt.asciidoc @@ -67,7 +67,7 @@ This value is the time period for which your data is guaranteed to be stored. Da Elasticsearch at a later time. [role="screenshot"] -image::images/index-mgmt/management-data-stream.png[Data stream details] +image::images/index-mgmt/management-data-stream-fields.png[Data stream details] * To view more information about a data stream, such as its generation or its current index lifecycle policy, click the stream's name. From this view, you can navigate to *Discover* to diff --git a/docs/reference/indices/put-index-template.asciidoc b/docs/reference/indices/put-index-template.asciidoc index 36fc66ecb90b8..9a31037546796 100644 --- a/docs/reference/indices/put-index-template.asciidoc +++ b/docs/reference/indices/put-index-template.asciidoc @@ -115,10 +115,10 @@ See <>. `index_mode`:: (Optional, string) Type of data stream to create. Valid values are `null` -(regular data stream) and `time_series` (<>). +(standard data stream), `time_series` (<>) and `logsdb` +(<>). + -If `time_series`, each backing index has an `index.mode` index setting of -`time_series`. +The template's `index_mode` sets the `index.mode` of the backing index. ===== `index_patterns`:: diff --git a/docs/reference/mapping/fields/synthetic-source.asciidoc b/docs/reference/mapping/fields/synthetic-source.asciidoc index f8666e2993d6a..ddbefb73f4522 100644 --- a/docs/reference/mapping/fields/synthetic-source.asciidoc +++ b/docs/reference/mapping/fields/synthetic-source.asciidoc @@ -1,17 +1,10 @@ [[synthetic-source]] ==== Synthetic `_source` -IMPORTANT: Synthetic `_source` is Generally Available only for TSDB indices -(indices that have `index.mode` set to `time_series`). For other indices, -synthetic `_source` is in technical preview. Features in technical preview may -be changed or removed in a future release. Elastic will work to fix -any issues, but features in technical preview are not subject to the support SLA -of official GA features. - Though very handy to have around, the source field takes up a significant amount of space on disk. Instead of storing source documents on disk exactly as you send them, Elasticsearch can reconstruct source content on the fly upon retrieval. -Enable this by using the value `synthetic` for the index setting `index.mapping.source.mode`: +To enable this https://www.elastic.co/subscriptions[subscription] feature, use the value `synthetic` for the index setting `index.mapping.source.mode`: [source,console,id=enable-synthetic-source-example] ---- @@ -30,7 +23,7 @@ PUT idx ---- // TESTSETUP -While this on the fly reconstruction is *generally* slower than saving the source +While this on-the-fly reconstruction is _generally_ slower than saving the source documents verbatim and loading them at query time, it saves a lot of storage space. Additional latency can be avoided by not loading `_source` field in queries when it is not needed. diff --git a/muted-tests.yml b/muted-tests.yml index c07363657b3ec..613d3a3655ccf 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -171,9 +171,6 @@ tests: - class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT method: test {p0=synonyms/90_synonyms_reloading_for_synset/Reload analyzers for specific synonym set} issue: https://github.com/elastic/elasticsearch/issues/116777 -- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests - method: testStopWorksInMiddleOfProcessing - issue: https://github.com/elastic/elasticsearch/issues/117591 - class: "org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT" method: "test {scoring.*}" issue: https://github.com/elastic/elasticsearch/issues/117641 @@ -317,6 +314,9 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/118220 - class: org.elasticsearch.xpack.esql.action.EsqlActionBreakerIT issue: https://github.com/elastic/elasticsearch/issues/118238 +- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests + method: testInvalidJSON + issue: https://github.com/elastic/elasticsearch/issues/116521 # Examples: # diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 5f907572641a6..e36604f9a58c8 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -37,8 +37,6 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.health.HealthStatus.GREEN; import static org.elasticsearch.health.HealthStatus.YELLOW; @@ -122,6 +120,18 @@ public void handleSnapshotRestore(ClusterState clusterState, Metadata.Builder md } } + @Override + protected void doStart() { + healthIndicatorService.startOccurred(); + super.doStart(); + } + + @Override + protected void doStop() { + super.doStop(); + healthIndicatorService.stopOccurred(); + } + /** * If the file settings metadata version is set to zero, then we have restored from * a snapshot and must reprocess the file. @@ -211,6 +221,7 @@ protected void processInitialFileMissing() throws ExecutionException, Interrupte public static class FileSettingsHealthIndicatorService implements HealthIndicatorService { static final String NAME = "file_settings"; + static final String INACTIVE_SYMPTOM = "File-based settings are inactive"; static final String NO_CHANGES_SYMPTOM = "No file-based setting changes have occurred"; static final String SUCCESS_SYMPTOM = "The most recent file-based settings were applied successfully"; static final String FAILURE_SYMPTOM = "The most recent file-based settings encountered an error"; @@ -225,21 +236,33 @@ public static class FileSettingsHealthIndicatorService implements HealthIndicato ) ); - private final AtomicLong changeCount = new AtomicLong(0); - private final AtomicLong failureStreak = new AtomicLong(0); - private final AtomicReference mostRecentFailure = new AtomicReference<>(); + private boolean isActive = false; + private long changeCount = 0; + private long failureStreak = 0; + private String mostRecentFailure = null; - public void changeOccurred() { - changeCount.incrementAndGet(); + public synchronized void startOccurred() { + isActive = true; + failureStreak = 0; } - public void successOccurred() { - failureStreak.set(0); + public synchronized void stopOccurred() { + isActive = false; + mostRecentFailure = null; } - public void failureOccurred(String description) { - failureStreak.incrementAndGet(); - mostRecentFailure.set(description); + public synchronized void changeOccurred() { + ++changeCount; + } + + public synchronized void successOccurred() { + failureStreak = 0; + mostRecentFailure = null; + } + + public synchronized void failureOccurred(String description) { + ++failureStreak; + mostRecentFailure = description; } @Override @@ -248,18 +271,20 @@ public String name() { } @Override - public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { - if (0 == changeCount.get()) { + public synchronized HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { + if (isActive == false) { + return createIndicator(GREEN, INACTIVE_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()); + } + if (0 == changeCount) { return createIndicator(GREEN, NO_CHANGES_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()); } - long numFailures = failureStreak.get(); - if (0 == numFailures) { + if (0 == failureStreak) { return createIndicator(GREEN, SUCCESS_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()); } else { return createIndicator( YELLOW, FAILURE_SYMPTOM, - new SimpleHealthIndicatorDetails(Map.of("failure_streak", numFailures, "most_recent_failure", mostRecentFailure.get())), + new SimpleHealthIndicatorDetails(Map.of("failure_streak", failureStreak, "most_recent_failure", mostRecentFailure)), STALE_SETTINGS_IMPACT, List.of() ); diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsHealthIndicatorServiceTests.java index 03d1adff42c4e..20ea43910e68d 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsHealthIndicatorServiceTests.java @@ -22,6 +22,7 @@ import static org.elasticsearch.health.HealthStatus.GREEN; import static org.elasticsearch.health.HealthStatus.YELLOW; import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.FAILURE_SYMPTOM; +import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.INACTIVE_SYMPTOM; import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.NO_CHANGES_SYMPTOM; import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.STALE_SETTINGS_IMPACT; import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.SUCCESS_SYMPTOM; @@ -39,14 +40,27 @@ public void initialize() { healthIndicatorService = new FileSettingsHealthIndicatorService(); } - public void testInitiallyGreen() { + public void testInitiallyGreen() {} + + public void testStartAndStop() { + assertEquals( + new HealthIndicatorResult("file_settings", GREEN, INACTIVE_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()), + healthIndicatorService.calculate(false, null) + ); + healthIndicatorService.startOccurred(); assertEquals( new HealthIndicatorResult("file_settings", GREEN, NO_CHANGES_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()), healthIndicatorService.calculate(false, null) ); + healthIndicatorService.stopOccurred(); + assertEquals( + new HealthIndicatorResult("file_settings", GREEN, INACTIVE_SYMPTOM, HealthIndicatorDetails.EMPTY, List.of(), List.of()), + healthIndicatorService.calculate(false, null) + ); } public void testGreenYellowYellowGreen() { + healthIndicatorService.startOccurred(); healthIndicatorService.changeOccurred(); // This is a strange case: a change occurred, but neither success nor failure have been reported yet. // While the change is still in progress, we don't change the status. diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 08d83e48b7152..c19cf7c31bc68 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NodeConnectionsService; -import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -69,6 +68,8 @@ import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; +import static org.elasticsearch.health.HealthStatus.GREEN; +import static org.elasticsearch.health.HealthStatus.YELLOW; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.hasEntry; @@ -82,8 +83,6 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyNoInteractions; -import static org.mockito.Mockito.verifyNoMoreInteractions; public class FileSettingsServiceTests extends ESTestCase { private static final Logger logger = LogManager.getLogger(FileSettingsServiceTests.class); @@ -138,7 +137,7 @@ public void setUp() throws Exception { List.of(new ReservedClusterSettingsAction(clusterSettings)) ) ); - healthIndicatorService = mock(FileSettingsHealthIndicatorService.class); + healthIndicatorService = spy(new FileSettingsHealthIndicatorService()); fileSettingsService = spy(new FileSettingsService(clusterService, controller, env, healthIndicatorService)); } @@ -170,7 +169,8 @@ public void testStartStop() { assertTrue(fileSettingsService.watching()); fileSettingsService.stop(); assertFalse(fileSettingsService.watching()); - verifyNoInteractions(healthIndicatorService); + verify(healthIndicatorService, times(1)).startOccurred(); + verify(healthIndicatorService, times(1)).stopOccurred(); } public void testOperatorDirName() { @@ -218,9 +218,9 @@ public void testInitialFileError() throws Exception { // assert we never notified any listeners of successful application of file based settings assertFalse(settingsChanged.get()); + assertEquals(YELLOW, healthIndicatorService.calculate(false, null).status()); verify(healthIndicatorService, times(1)).changeOccurred(); verify(healthIndicatorService, times(1)).failureOccurred(argThat(s -> s.startsWith(IllegalStateException.class.getName()))); - verifyNoMoreInteractions(healthIndicatorService); } @SuppressWarnings("unchecked") @@ -246,9 +246,9 @@ public void testInitialFileWorks() throws Exception { verify(fileSettingsService, times(1)).processFileOnServiceStart(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any()); + assertEquals(GREEN, healthIndicatorService.calculate(false, null).status()); verify(healthIndicatorService, times(1)).changeOccurred(); verify(healthIndicatorService, times(1)).successOccurred(); - verifyNoMoreInteractions(healthIndicatorService); } @SuppressWarnings("unchecked") @@ -285,9 +285,9 @@ public void testProcessFileChanges() throws Exception { verify(fileSettingsService, times(1)).processFileChanges(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_VERSION_ONLY), any()); + assertEquals(GREEN, healthIndicatorService.calculate(false, null).status()); verify(healthIndicatorService, times(2)).changeOccurred(); verify(healthIndicatorService, times(2)).successOccurred(); - verifyNoMoreInteractions(healthIndicatorService); } public void testInvalidJSON() throws Exception { @@ -323,6 +323,7 @@ public void testInvalidJSON() throws Exception { // referring to fileSettingsService.start(). Rather, it is referring to the initialization // of the watcher thread itself, which occurs asynchronously when clusterChanged is first called. + assertEquals(YELLOW, healthIndicatorService.calculate(false, null).status()); verify(healthIndicatorService).failureOccurred(contains(XContentParseException.class.getName())); } @@ -388,14 +389,13 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { fileSettingsService.stop(); assertFalse(fileSettingsService.watching()); fileSettingsService.close(); + + // When the service is stopped, the health indicator should be green + assertEquals(GREEN, healthIndicatorService.calculate(false, null).status()); + verify(healthIndicatorService).stopOccurred(); + // let the deadlocked thread end, so we can cleanly exit the test deadThreadLatch.countDown(); - - verify(healthIndicatorService, times(1)).changeOccurred(); - verify(healthIndicatorService, times(1)).failureOccurred( - argThat(s -> s.startsWith(FailedToCommitClusterStateException.class.getName())) - ); - verifyNoMoreInteractions(healthIndicatorService); } public void testHandleSnapshotRestoreClearsMetadata() throws Exception { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java index e3719d57ca25c..aaaaf9943a611 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Priority; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Strings; import static org.elasticsearch.xpack.core.ilm.LifecycleOperationMetadata.currentILMMode; import static org.elasticsearch.xpack.core.ilm.LifecycleOperationMetadata.currentSLMMode; @@ -143,7 +144,10 @@ private ClusterState updateSLMState(final ClusterState currentState) { @Override public void onFailure(Exception e) { - logger.error("unable to update lifecycle metadata with new ilm mode [" + ilmMode + "], slm mode [" + slmMode + "]", e); + logger.error( + () -> Strings.format("unable to update lifecycle metadata with new ilm mode [%s], slm mode [%s]", ilmMode, slmMode), + e + ); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java index 7963d04e0f666..70f75f1cfcdfa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyTests.java @@ -151,17 +151,7 @@ public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String l // Remove the frozen phase, we'll randomly re-add it later .filter(pn -> TimeseriesLifecycleType.FROZEN_PHASE.equals(pn) == false) .collect(Collectors.toList()); - Map phases = Maps.newMapWithExpectedSize(phaseNames.size()); - Function> validActions = getPhaseToValidActions(); - Function randomAction = getNameToActionFunction(); - // as what actions end up in the hot phase influence what actions are allowed in the subsequent phases we'll move the hot phase - // at the front of the phases to process (if it exists) - if (phaseNames.contains(TimeseriesLifecycleType.HOT_PHASE)) { - phaseNames.remove(TimeseriesLifecycleType.HOT_PHASE); - phaseNames.add(0, TimeseriesLifecycleType.HOT_PHASE); - } - boolean hotPhaseContainsSearchableSnap = false; - boolean coldPhaseContainsSearchableSnap = false; + // let's order the phases so we can reason about actions in a previous phase in order to generate a random *valid* policy List orderedPhases = new ArrayList<>(phaseNames.size()); for (String validPhase : TimeseriesLifecycleType.ORDERED_VALID_PHASES) { @@ -170,6 +160,12 @@ public static LifecyclePolicy randomTimeseriesLifecyclePolicy(@Nullable String l } } + Map phases = Maps.newMapWithExpectedSize(phaseNames.size()); + Function> validActions = getPhaseToValidActions(); + Function randomAction = getNameToActionFunction(); + boolean hotPhaseContainsSearchableSnap = false; + boolean coldPhaseContainsSearchableSnap = false; + TimeValue prev = null; for (String phase : orderedPhases) { TimeValue after = prev == null diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java index e849512aa8f73..0de234615f4c7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java @@ -22,7 +22,7 @@ public class MockAction implements LifecycleAction { public static final String NAME = "TEST_ACTION"; - private List steps; + private final List steps; private static final ObjectParser PARSER = new ObjectParser<>(NAME, MockAction::new); private final boolean safe; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index f830a2821d841..5b06ad93a9b07 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -91,7 +91,6 @@ import java.io.IOException; import java.time.Clock; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.function.LongSupplier; @@ -121,7 +120,7 @@ protected Clock getClock() { @Override public List> getSettings() { - return Arrays.asList( + return List.of( LifecycleSettings.LIFECYCLE_POLL_INTERVAL_SETTING, LifecycleSettings.LIFECYCLE_NAME_SETTING, LifecycleSettings.LIFECYCLE_INDEXING_COMPLETE_SETTING, @@ -203,7 +202,7 @@ public List getNamedXContent() { } private static List xContentEntries() { - return Arrays.asList( + return List.of( // Custom Metadata new NamedXContentRegistry.Entry( Metadata.Custom.class, @@ -259,52 +258,38 @@ public List getRestHandlers( Supplier nodesInCluster, Predicate clusterSupportsFeature ) { - List handlers = new ArrayList<>(); - - handlers.addAll( - Arrays.asList( - // add ILM rest handlers - new RestPutLifecycleAction(), - new RestGetLifecycleAction(), - new RestDeleteLifecycleAction(), - new RestExplainLifecycleAction(), - new RestRemoveIndexLifecyclePolicyAction(), - new RestMoveToStepAction(), - new RestRetryAction(), - new RestStopAction(), - new RestStartILMAction(), - new RestGetStatusAction(), - new RestMigrateToDataTiersAction() - ) + return List.of( + new RestPutLifecycleAction(), + new RestGetLifecycleAction(), + new RestDeleteLifecycleAction(), + new RestExplainLifecycleAction(), + new RestRemoveIndexLifecyclePolicyAction(), + new RestMoveToStepAction(), + new RestRetryAction(), + new RestStopAction(), + new RestStartILMAction(), + new RestGetStatusAction(), + new RestMigrateToDataTiersAction() ); - return handlers; } @Override public List> getActions() { - var ilmUsageAction = new ActionHandler<>(XPackUsageFeatureAction.INDEX_LIFECYCLE, IndexLifecycleUsageTransportAction.class); - var ilmInfoAction = new ActionHandler<>(XPackInfoFeatureAction.INDEX_LIFECYCLE, IndexLifecycleInfoTransportAction.class); - var migrateToDataTiersAction = new ActionHandler<>(MigrateToDataTiersAction.INSTANCE, TransportMigrateToDataTiersAction.class); - List> actions = new ArrayList<>(); - actions.add(ilmUsageAction); - actions.add(ilmInfoAction); - actions.add(migrateToDataTiersAction); - actions.addAll( - Arrays.asList( - // add ILM actions - new ActionHandler<>(ILMActions.PUT, TransportPutLifecycleAction.class), - new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), - new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), - new ActionHandler<>(ExplainLifecycleAction.INSTANCE, TransportExplainLifecycleAction.class), - new ActionHandler<>(RemoveIndexLifecyclePolicyAction.INSTANCE, TransportRemoveIndexLifecyclePolicyAction.class), - new ActionHandler<>(ILMActions.MOVE_TO_STEP, TransportMoveToStepAction.class), - new ActionHandler<>(ILMActions.RETRY, TransportRetryAction.class), - new ActionHandler<>(ILMActions.START, TransportStartILMAction.class), - new ActionHandler<>(ILMActions.STOP, TransportStopILMAction.class), - new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) - ) + return List.of( + new ActionHandler<>(XPackUsageFeatureAction.INDEX_LIFECYCLE, IndexLifecycleUsageTransportAction.class), + new ActionHandler<>(XPackInfoFeatureAction.INDEX_LIFECYCLE, IndexLifecycleInfoTransportAction.class), + new ActionHandler<>(MigrateToDataTiersAction.INSTANCE, TransportMigrateToDataTiersAction.class), + new ActionHandler<>(ILMActions.PUT, TransportPutLifecycleAction.class), + new ActionHandler<>(GetLifecycleAction.INSTANCE, TransportGetLifecycleAction.class), + new ActionHandler<>(DeleteLifecycleAction.INSTANCE, TransportDeleteLifecycleAction.class), + new ActionHandler<>(ExplainLifecycleAction.INSTANCE, TransportExplainLifecycleAction.class), + new ActionHandler<>(RemoveIndexLifecyclePolicyAction.INSTANCE, TransportRemoveIndexLifecyclePolicyAction.class), + new ActionHandler<>(ILMActions.MOVE_TO_STEP, TransportMoveToStepAction.class), + new ActionHandler<>(ILMActions.RETRY, TransportRetryAction.class), + new ActionHandler<>(ILMActions.START, TransportStartILMAction.class), + new ActionHandler<>(ILMActions.STOP, TransportStopILMAction.class), + new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) ); - return actions; } List> reservedClusterStateHandlers() { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java index efa8e67fee3c8..85739dcd0dcfb 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunner.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -39,7 +40,6 @@ import java.util.Collections; import java.util.HashSet; -import java.util.Locale; import java.util.Objects; import java.util.Set; import java.util.function.LongSupplier; @@ -290,13 +290,7 @@ void onErrorMaybeRetryFailedStep(String policy, StepKey currentStep, IndexMetada // IndexLifecycleRunner#runPeriodicStep} run the policy will still be in the ERROR step, as we haven't been able // to move it back into the failed step, so we'll try again submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, - "ilm-retry-failed-step {policy [%s], index [%s], failedStep [%s]}", - policy, - index, - failedStep.getKey() - ), + Strings.format("ilm-retry-failed-step {policy [%s], index [%s], failedStep [%s]}", policy, index, failedStep.getKey()), new MoveToRetryFailedStepUpdateTask(indexMetadata.getIndex(), policy, currentStep, failedStep) ); } else { @@ -444,7 +438,7 @@ void runPolicyAfterStateChange(String policy, IndexMetadata indexMetadata) { } else if (currentStep instanceof ClusterStateActionStep || currentStep instanceof ClusterStateWaitStep) { logger.debug("[{}] running policy with current-step [{}]", indexMetadata.getIndex().getName(), currentStep.getKey()); submitUnlessAlreadyQueued( - String.format(Locale.ROOT, "ilm-execute-cluster-state-steps [%s]", currentStep), + Strings.format("ilm-execute-cluster-state-steps [%s]", currentStep), new ExecuteStepsUpdateTask(policy, indexMetadata.getIndex(), currentStep, stepRegistry, this, nowSupplier) ); } else { @@ -459,8 +453,7 @@ void runPolicyAfterStateChange(String policy, IndexMetadata indexMetadata) { private void moveToStep(Index index, String policy, Step.StepKey currentStepKey, Step.StepKey newStepKey) { logger.debug("[{}] moving to step [{}] {} -> {}", index.getName(), policy, currentStepKey, newStepKey); submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, + Strings.format( "ilm-move-to-step {policy [%s], index [%s], currentStep [%s], nextStep [%s]}", policy, index.getName(), @@ -486,13 +479,7 @@ private void moveToErrorStep(Index index, String policy, Step.StepKey currentSte e ); submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, - "ilm-move-to-error-step {policy [%s], index [%s], currentStep [%s]}", - policy, - index.getName(), - currentStepKey - ), + Strings.format("ilm-move-to-error-step {policy [%s], index [%s], currentStep [%s]}", policy, index.getName(), currentStepKey), new MoveToErrorStepUpdateTask(index, policy, currentStepKey, e, nowSupplier, stepRegistry::getStep, clusterState -> { IndexMetadata indexMetadata = clusterState.metadata().index(index); registerFailedOperation(indexMetadata, e); @@ -506,13 +493,7 @@ private void moveToErrorStep(Index index, String policy, Step.StepKey currentSte */ private void setStepInfo(Index index, String policy, @Nullable Step.StepKey currentStepKey, ToXContentObject stepInfo) { submitUnlessAlreadyQueued( - String.format( - Locale.ROOT, - "ilm-set-step-info {policy [%s], index [%s], currentStep [%s]}", - policy, - index.getName(), - currentStepKey - ), + Strings.format("ilm-set-step-info {policy [%s], index [%s], currentStep [%s]}", policy, index.getName(), currentStepKey), new SetStepInfoUpdateTask(index, policy, currentStepKey, stepInfo) ); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index 9c978ffc25cba..e59bde7253051 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -354,7 +354,7 @@ private void cancelJob() { @Override public void triggered(SchedulerEngine.Event event) { if (event.jobName().equals(XPackField.INDEX_LIFECYCLE)) { - logger.trace("job triggered: " + event.jobName() + ", " + event.scheduledTime() + ", " + event.triggeredTime()); + logger.trace("job triggered: {}, {}, {}", event.jobName(), event.scheduledTime(), event.triggeredTime()); triggerPolicies(clusterService.state(), false); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java index 4567e291aebed..296623b54509f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistry.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -42,7 +43,6 @@ import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -269,9 +269,8 @@ public Set parseStepKeysFromPhase(String policy, String currentPha return parseStepsFromPhase(policy, currentPhase, phaseDefNonNull).stream().map(Step::getKey).collect(Collectors.toSet()); } catch (IOException e) { logger.trace( - () -> String.format( - Locale.ROOT, - "unable to parse steps for policy [{}], phase [{}], and phase definition [{}]", + () -> Strings.format( + "unable to parse steps for policy [%s], phase [%s], and phase definition [%s]", policy, currentPhase, phaseDef diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java index 48cf84ed7a6a4..494f0ee444236 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java @@ -145,7 +145,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - rerouteService.reroute("cluster migrated to data tiers routing", Priority.NORMAL, new ActionListener() { + rerouteService.reroute("cluster migrated to data tiers routing", Priority.NORMAL, new ActionListener<>() { @Override public void onResponse(Void ignored) {} diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java index b8af3e8e0daa2..549b321be8182 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java @@ -58,7 +58,7 @@ public class ILMHistoryStore implements Closeable { public static final String ILM_HISTORY_DATA_STREAM = "ilm-history-" + INDEX_TEMPLATE_VERSION; - private static int ILM_HISTORY_BULK_SIZE = StrictMath.toIntExact( + private static final int ILM_HISTORY_BULK_SIZE = StrictMath.toIntExact( ByteSizeValue.parseBytesSizeValue( System.getProperty("es.indices.lifecycle.history.bulk.size", "50MB"), "es.indices.lifecycle.history.bulk.size" diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java index 37d586240eb7a..49aa0a65a5704 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java @@ -72,7 +72,7 @@ public class IndexLifecycleTransitionTests extends ESTestCase { public void testMoveClusterStateToNextStep() { String indexName = "my_index"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); Phase nextPhase = policy.getPhases() @@ -125,7 +125,7 @@ public void testMoveClusterStateToNextStep() { public void testMoveClusterStateToNextStepSamePhase() { String indexName = "my_index"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); List policyMetadatas = Collections.singletonList( @@ -176,7 +176,7 @@ public void testMoveClusterStateToNextStepSamePhase() { public void testMoveClusterStateToNextStepSameAction() { String indexName = "my_index"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy("policy") ); List policyMetadatas = Collections.singletonList( @@ -228,7 +228,7 @@ public void testSuccessfulValidatedMoveClusterStateToNextStep() { String indexName = "my_index"; String policyName = "my_policy"; LifecyclePolicy policy = randomValueOtherThanMany( - p -> p.getPhases().size() == 0, + p -> p.getPhases().isEmpty(), () -> LifecyclePolicyTests.randomTestLifecyclePolicy(policyName) ); Phase nextPhase = policy.getPhases() @@ -1436,6 +1436,6 @@ private void assertClusterStateStepInfo( assertEquals(expectedstepInfoValue, newLifecycleState.stepInfo()); assertEquals(oldLifecycleState.phaseTime(), newLifecycleState.phaseTime()); assertEquals(oldLifecycleState.actionTime(), newLifecycleState.actionTime()); - assertEquals(newLifecycleState.stepTime(), newLifecycleState.stepTime()); + assertEquals(oldLifecycleState.stepTime(), newLifecycleState.stepTime()); } } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java index f2a6e33f7cb05..ca81a03fc5630 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/GetMigrationReindexStatusTransportAction.java @@ -88,7 +88,7 @@ void getRunningTaskFromNode(String persistentTaskId, ActionListener li listener.onFailure( new ResourceNotFoundException( Strings.format( - "Persistent task [{}] is supposed to be running on node [{}], " + "but the task is not found on that node", + "Persistent task [%s] is supposed to be running on node [%s], but the task is not found on that node", persistentTaskId, clusterService.localNode().getId() ) @@ -106,7 +106,7 @@ private void runOnNodeWithTaskIfPossible(Task thisTask, Request request, String listener.onFailure( new ResourceNotFoundException( Strings.format( - "Persistent task [{}] is supposed to be running on node [{}], but that node is not part of the cluster", + "Persistent task [%s] is supposed to be running on node [%s], but that node is not part of the cluster", request.getIndex(), nodeId ) diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java index 9e4cbb1082215..39d4170f6e712 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamAction.java @@ -13,10 +13,14 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.FeatureFlag; import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -39,10 +43,24 @@ public class ReindexDataStreamAction extends ActionType getOldIndexVersionPredicate(Metadata metadata) { + return index -> metadata.index(index).getCreationVersion().onOrBefore(MINIMUM_WRITEABLE_VERSION_AFTER_UPGRADE); + } + public enum Mode { UPGRADE } diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java index 95a078690a055..f011c429ce79c 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/action/ReindexDataStreamTransportAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.xpack.migrate.task.ReindexDataStreamTaskParams; import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.TASK_ID_PREFIX; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; /* * This transport action creates a new persistent task for reindexing the source data stream given in the request. On successful creation @@ -67,10 +68,7 @@ protected void doExecute(Task task, ReindexDataStreamRequest request, ActionList return; } int totalIndices = dataStream.getIndices().size(); - int totalIndicesToBeUpgraded = (int) dataStream.getIndices() - .stream() - .filter(index -> metadata.index(index).getCreationVersion().isLegacyIndexVersion()) - .count(); + int totalIndicesToBeUpgraded = (int) dataStream.getIndices().stream().filter(getOldIndexVersionPredicate(metadata)).count(); ReindexDataStreamTaskParams params = new ReindexDataStreamTaskParams( sourceDataStreamName, transportService.getThreadPool().absoluteTimeInMillis(), diff --git a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java index fc471cfa89f26..7ec5014b9edff 100644 --- a/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java +++ b/x-pack/plugin/migrate/src/main/java/org/elasticsearch/xpack/migrate/task/ReindexDataStreamPersistentTaskExecutor.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.migrate.action.ReindexDataStreamAction.getOldIndexVersionPredicate; + public class ReindexDataStreamPersistentTaskExecutor extends PersistentTasksExecutor { private static final TimeValue TASK_KEEP_ALIVE_TIME = TimeValue.timeValueDays(1); private final Client client; @@ -72,7 +74,7 @@ protected void nodeOperation(AllocatedPersistentTask task, ReindexDataStreamTask if (dataStreamInfos.size() == 1) { List indices = dataStreamInfos.getFirst().getDataStream().getIndices(); List indicesToBeReindexed = indices.stream() - .filter(index -> clusterService.state().getMetadata().index(index).getCreationVersion().isLegacyIndexVersion()) + .filter(getOldIndexVersionPredicate(clusterService.state().metadata())) .toList(); reindexDataStreamTask.setPendingIndicesCount(indicesToBeReindexed.size()); for (Index index : indicesToBeReindexed) {