From 895cb7e30dfdb8992adb0fd64b3acbebbf55bad6 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 22 Jun 2021 18:59:00 -0400 Subject: [PATCH 01/29] Create a `time_series_mode` option for indices Enabling this setting will put the index into a mode optimized for time series data, grouping documents who's fields are annotated with `dimension`. Because time time series data is usually write-once we also disable your ability to update or delete documents. We have big plans for things we can do with data organized in this way, but for now the primary advantage of enabling `time_series_mode` is that it shrinks the index on disk. We did some local tests with k8s monitoring data in ECS format. Here are some figures for a single sharded index without replicas, force merged to a single segment: uncompressed JSON: 150GB defaults: 60GB best_compression: 39GB time_series_mode: 33GB best_compression and time_series_mode: 22GB ((NOCOMMIT: get the size measurements after running the speed tests)) So the compression you get is similar to best_compression and it can operate "on top of" best compression to further reduce the index size. The price is, like we mentioned above, disabling update and delete of documents. In addition to that, there is a performance cost on ingest and on mere. Loading the data from above took: best_compression: 2.6 hours 0.8 hours best_compression sort on all dimensions: time_series_mode: (measuring) best_compression and time_series_mode: (measuring) In addition to the space savings this creates an unsearchable `_tsid` field which functions as a short hand for aggregating on all the dimension. So you can run this: ``` POST localhost:9200/test/_search?pretty { "size": 0, "aggs": { "tsid": { "terms": { "field": "_tsid" }, "aggs": { "max_n": { "max": { "field": "n" } } } } } } ``` and the `key` field in the json of the terms bucket will contain all of the dimensions. Something like this: ``` "aggregations" : { "tsid" : { "buckets" : [ {"key":{"dim1":"a","dim2":"a"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"a","dim2":"b"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"a","dim2":"c"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"a","dim2":"d"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"b","dim2":"a"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"b","dim2":"b"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"b","dim2":"c"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"b","dim2":"d"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"c","dim2":"a"},"doc_count":6,"max_n":{"value":6}}, {"key":{"dim1":"c","dim2":"b"},"doc_count":6,"max_n":{"value":6}} ] } } ``` --- .../xcontent/support/MapXContentParser.java | 2 +- .../xcontent/MapXContentParserTests.java | 39 +- .../20_tsdb_consistency.yml | 131 ++++ .../rest-api-spec/test/tsdb/10_search.yml | 326 ++++++++++ .../rest-api-spec/test/tsdb/20_bad_config.yml | 139 +++++ .../test/tsdb/30_unsupported_operations.yml | 152 +++++ .../test/tsdb/40_invalid_indexing.yml | 209 +++++++ .../test/tsdb/50_dimension_types.yml | 202 +++++++ .../action/bulk/TransportBulkAction.java | 29 +- .../action/index/IndexRequest.java | 62 +- .../action/update/TransportUpdateAction.java | 7 +- .../cluster/metadata/IndexMetadata.java | 31 +- .../cluster/metadata/Metadata.java | 3 +- .../common/settings/IndexScopedSettings.java | 14 +- .../elasticsearch/index/IndexSettings.java | 40 ++ .../elasticsearch/index/IndexSortConfig.java | 25 +- .../index/TimeSeriesIdGenerator.java | 329 ++++++++++ .../index/mapper/DocumentMapper.java | 11 +- .../index/mapper/IpFieldMapper.java | 80 ++- .../index/mapper/KeywordFieldMapper.java | 57 +- .../elasticsearch/index/mapper/Mapper.java | 4 + .../index/mapper/MapperService.java | 2 +- .../elasticsearch/index/mapper/Mapping.java | 5 + .../index/mapper/NumberFieldMapper.java | 93 ++- .../index/mapper/ObjectMapper.java | 17 + .../index/mapper/RoutingFieldMapper.java | 5 + .../index/mapper/TimeSeriesIdFieldMapper.java | 121 ++++ .../query/CoordinatorRewriteContext.java | 1 + .../elasticsearch/indices/IndicesModule.java | 18 +- .../elasticsearch/indices/IndicesService.java | 32 +- .../indices/TimeSeriesIdGeneratorService.java | 386 ++++++++++++ .../elasticsearch/search/DocValueFormat.java | 33 + .../elasticsearch/search/SearchModule.java | 1 + .../bucket/terms/StringTerms.java | 3 + ...ActionIndicesThatCannotBeCreatedTests.java | 4 +- .../bulk/TransportBulkActionIngestTests.java | 9 +- .../action/bulk/TransportBulkActionTests.java | 53 +- .../bulk/TransportBulkActionTookTests.java | 1 + .../index/IndexSortSettingsTests.java | 76 ++- .../index/TimeSeriesIdGeneratorTests.java | 565 ++++++++++++++++++ .../index/mapper/DocumentMapperTests.java | 68 ++- .../indices/IndicesModuleTests.java | 17 +- .../TimeSeriesIdGeneratorServiceTests.java | 220 +++++++ .../snapshots/SnapshotResiliencyTests.java | 3 +- 44 files changed, 3497 insertions(+), 128 deletions(-) create mode 100644 qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_dimension_types.yml create mode 100644 server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java create mode 100644 server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java create mode 100644 server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java create mode 100644 server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java index b23cc553fdf42..fa967d8663e85 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/MapXContentParser.java @@ -163,7 +163,7 @@ public Object objectBytes() throws IOException { @Override public boolean hasTextCharacters() { - throw new UnsupportedOperationException("use text() instead"); + return false; } @Override diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java index 49071b968f29a..afae071adc83e 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/MapXContentParserTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.MapXContentParser; import org.elasticsearch.test.ESTestCase; @@ -57,19 +58,51 @@ public void testSimpleMap() throws IOException { }); } - public void testRandomObject() throws IOException { compareTokens(builder -> generateRandomObject(builder, randomIntBetween(0, 10))); } - public void compareTokens(CheckedConsumer consumer) throws IOException { + /** + * Assert that {@link XContentParser#hasTextCharacters()} returns false because + * we don't support {@link XContentParser#textCharacters()}. + */ + public void testHasTextCharacters() throws IOException { + assertFalse( + new MapXContentParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + Map.of("a", "b"), + randomFrom(XContentType.values()) + ).hasTextCharacters() + ); + } + + public void testCopyCurrentStructure() throws IOException { + try ( + XContentParser parser = new MapXContentParser( + xContentRegistry(), + LoggingDeprecationHandler.INSTANCE, + Map.of("a", "b"), + randomFrom(XContentType.values()) + ) + ) { + try ( + XContentBuilder builder = JsonXContent.contentBuilder().copyCurrentStructure(parser); + XContentParser copied = createParser(builder) + ) { + assertEquals(copied.map(), Map.of("a", "b")); + } + } + } + + private void compareTokens(CheckedConsumer consumer) throws IOException { for (XContentType xContentType : EnumSet.allOf(XContentType.class)) { logger.info("--> testing with xcontent type: {}", xContentType); compareTokens(consumer, xContentType); } } - public void compareTokens(CheckedConsumer consumer, XContentType xContentType) throws IOException { + private void compareTokens(CheckedConsumer consumer, XContentType xContentType) throws IOException { try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { consumer.accept(builder); final Map map; diff --git a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml new file mode 100644 index 0000000000000..0fda1a234e0d2 --- /dev/null +++ b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml @@ -0,0 +1,131 @@ +# Test the time_series_mode properly groups by _tsid. If we could put this in +# rest-api-spec we would, but it requires painless. + +setup: + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + + +--- +"each shard has unique _tsids": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + check: + scripted_metric: + init_script: "state.timeSeries = new HashSet()" + map_script: "state.timeSeries.add(doc._tsid.value)" + combine_script: "return state.timeSeries" + reduce_script: | + Set timeSeries = new TreeSet(); + for (s in states) { + for (ts in s) { + boolean newTs = timeSeries.add(ts); + if (false == newTs) { + throw new IllegalArgumentException(ts + " appeared in two shards"); + } + } + } + return timeSeries; + + - match: {hits.total.value: 8} + - length: {aggregations.check.value: 2} + + +--- +"segments are sorted": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + check: + scripted_metric: + init_script: "state.timeSeries = new HashMap()" + map_script: | + String ts = doc._tsid.value; + long time = doc["@timestamp"].value.toInstant().toEpochMilli(); + Long prevTime = state.timeSeries.put(ts, time); + if (prevTime == null) { + return; // First time seeing + } + if (prevTime.longValue() > time) { + throw new IllegalArgumentException(ts + " appeared out of order. " + prevTime + " was before " + time); + } + combine_script: "return state.timeSeries" + reduce_script: | + Map timeSeries = new TreeMap(); + for (s in states) { + for (ts in s.entrySet()) { + Long prev = timeSeries.put(ts.key, ts.value); + if (prev != null) { + throw new IllegalArgumentException(ts.key + " appeared in two shards"); + } + } + } + return timeSeries; + + - match: {hits.total.value: 8} + - length: {aggregations.check.value: 2} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml new file mode 100644 index 0000000000000..0ef25302eacc6 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml @@ -0,0 +1,326 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + +--- +"query a dimension": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + query: + match: + k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 + + - match: {hits.total.value: 4} + +--- +"query a metric": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + query: + range: + k8s.pod.network.tx: + gt: 2006223737 + + - match: {hits.total.value: 1} + +--- +"query tsid fails": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /\[_tsid\] is not searchable/ + search: + index: test + body: + query: + term: + _tsid: wont't work + +--- +"fetch a dimension": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + fields: + - field: k8s.pod.uid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields.k8s\.pod\.uid: [947e4ced-1786-4e53-9e0c-5c447e959507]} + +--- +"fetch a metric": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + fields: + - field: k8s.pod.network.tx + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields.k8s\.pod\.network\.tx: [2012916202]} + +--- +"fetch a tag": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + fields: + - field: k8s.pod.ip + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields.k8s\.pod\.ip: ['10.10.55.2']} + +--- +"fetch the tsid": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + +--- +"don't fetch tsid by default": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - is_false: hits.hits.0.fields._tsid + +--- +"aggregate a dimension": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + uids: + terms: + field: k8s.pod.uid + + - match: {hits.total.value: 8} + - match: {aggregations.uids.buckets.0.key: 947e4ced-1786-4e53-9e0c-5c447e959507} + - match: {aggregations.uids.buckets.0.doc_count: 4} + - match: {aggregations.uids.buckets.1.key: df3145b3-0563-4d3b-a0f7-897eb2876ea9} + - match: {aggregations.uids.buckets.1.doc_count: 4} + +--- +"aggregate a metric": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + uids: + terms: + field: k8s.pod.uid + aggs: + max_rx: + max: + field: k8s.pod.network.rx + + - match: {hits.total.value: 8} + - match: {aggregations.uids.buckets.0.key: 947e4ced-1786-4e53-9e0c-5c447e959507} + - match: {aggregations.uids.buckets.0.doc_count: 4} + - match: {aggregations.uids.buckets.0.max_rx.value: 803685721} + - match: {aggregations.uids.buckets.1.key: df3145b3-0563-4d3b-a0f7-897eb2876ea9} + - match: {aggregations.uids.buckets.1.doc_count: 4} + - match: {aggregations.uids.buckets.1.max_rx.value: 530605511} + +--- +"aggregate a tag": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + ips: + terms: + field: k8s.pod.ip + order: + _key: asc + + - match: {hits.total.value: 8} + - match: {aggregations.ips.buckets.0.key: 10.10.55.1} + - match: {aggregations.ips.buckets.0.doc_count: 3} + - match: {aggregations.ips.buckets.1.key: 10.10.55.2} + - match: {aggregations.ips.buckets.1.doc_count: 1} + - match: {aggregations.ips.buckets.2.key: 10.10.55.3} + - match: {aggregations.ips.buckets.2.doc_count: 4} + +--- +"aggregate the tsid": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + +--- +"field capabilities": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + field_caps: + index: test + fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, _tsid] + + - match: {fields.k8s\.pod\.uid.keyword.searchable: true} + - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} + - is_false: fields.k8s\.pod\.uid.keyword.indices + - is_false: fields.k8s\.pod\.uid.keyword.non_searchable_indices + - is_false: fields.k8s\.pod\.uid.keyword.non_aggregatable_indices + - match: {fields.k8s\.pod\.network\.rx.long.searchable: true} + - match: {fields.k8s\.pod\.network\.rx.long.aggregatable: true} + - is_false: fields.k8s\.pod\.network\.rx.long.indices + - is_false: fields.k8s\.pod\.network\.rx.long.non_searchable_indices + - is_false: fields.k8s\.pod\.network\.rx.long.non_aggregatable_indices + - match: {fields.k8s\.pod\.ip.ip.searchable: true} + - match: {fields.k8s\.pod\.ip.ip.aggregatable: true} + - is_false: fields.k8s\.pod\.ip.ip.indices + - is_false: fields.k8s\.pod\.ip.ip.non_searchable_indices + - is_false: fields.k8s\.pod\.ip.ip.non_aggregatable_indices + - match: {fields._tsid._tsid.searchable: false} + - match: {fields._tsid._tsid.aggregatable: true} + - is_false: fields._tsid._tsid.indices + - is_false: fields._tsid._tsid.non_searchable_indices + - is_false: fields._tsid._tsid.non_aggregatable_indices diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml new file mode 100644 index 0000000000000..f94d7f2506f20 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml @@ -0,0 +1,139 @@ +--- +"no timestamp": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /unknown index sort field:\[@timestamp\] required by \[index\.time_series_mode\]/ + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + metricset: + type: keyword + dimension: true + +--- +"no dimenions": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /Index configured with \[index.time_series_mode\] requires at least one field configured with \[dimension:true\]/ + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + +--- +"dimension with ignore_above": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /Field \[ignore_above\] cannot be set in conjunction with field \[dimension\]/ + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + ignore_above: 1024 + +--- +"dimension with normalizer": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /Field \[normalizer\] cannot be set in conjunction with field \[dimension\]/ + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + normalizer: lowercase + +--- +"dimension with super long name": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /Dimension name must be less than \[512\] bytes but \[.+\..+\..+\..+\..+\] was \[524\]/ + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + superduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperlong: + properties: + superduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperlong: + properties: + superduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperlong: + properties: + superduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperlong: + properties: + superduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperduperlong: + type: keyword + dimension: true + +--- +"provide sort in time series mode": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /Can't set \[index\.sort\.field\] in time series mode/ + indices.create: + index: test + body: + settings: + index: + sort: + field: "@timestamp" + order: desc + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml new file mode 100644 index 0000000000000..b45539d77dd7f --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml @@ -0,0 +1,152 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + +--- +"delete": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /\[DELETE\] is not supported because the destination index \[test\] is in time series mode/ + delete: + index: test + id: 1 + +--- +"delete over _bulk": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + bulk: + index: test + body: + - '{"delete": {"_id": 1}}' + - '{"delete": {"_id": 2}}' + - match: {items.0.delete.error.reason: "[DELETE] is not supported because the destination index [test] is in time series mode"} + +--- +"update": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + # We fail even though the document isn't found. + - do: + catch: /\[UPDATE\] is not supported because the destination index \[test\] is in time series mode/ + update: + index: test + id: 1 + body: + doc: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: "pod" + k8s: + pod: + name: "cat" + uid: "947e4ced-1786-4e53-9e0c-5c447e959507" + ip: "10.10.55.1" + network: + tx: 2001818691 + rx: 802133794 + +--- +"noop update": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + body: + fields: + - field: k8s.pod.uid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - do: + catch: /\[UPDATE\] is not supported because the destination index \[test\] is in time series mode/ + update: + index: test + id: $body.hits.hits.0._id + body: + doc: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: "pod" + k8s: + pod: + name: "cat" + uid: "947e4ced-1786-4e53-9e0c-5c447e959507" + ip: "10.10.55.1" + network: + tx: 2001818691 + rx: 802133794 + +--- +"update over _bulk": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + bulk: + index: test + body: + - '{"update": {"_id": 1}}' + - '{"doc":{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}}' + - match: {items.0.update.error.reason: "[UPDATE] is not supported because the destination index [test] is in time series mode"} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml new file mode 100644 index 0000000000000..bbecab2f2b9a8 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml @@ -0,0 +1,209 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + +--- +"index without dimensions": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: "/Error building time series id: Document must contain one of the dimensions \\[k8s.pod.uid, metricset\\]/" + index: + index: test + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + +--- +"index over bulk without dimensions": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + bulk: + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z"}' + - match: {items.0.index.error.reason: "Error building time series id: Document must contain one of the dimensions [k8s.pod.uid, metricset]"} + +--- +"index with invalid ip": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: bad_ip + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + ip: + type: ip + dimension: true + + - do: + catch: "/Error building time series id: error extracting dimension \\[ip\\]: 'not an ip' is not an IP string literal./" + index: + index: bad_ip + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + ip: not an ip + +--- +"index with invalid number": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: bad_ip + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + n: + type: long + dimension: true + + - do: + catch: "/Error building time series id: error extracting dimension \\[n\\]: For input string: \"not a number\"/" + index: + index: bad_ip + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + n: not a number + + +--- +"index with routing": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /routing cannot be set because the destination index \[test\] is in time series mode/ + index: + index: test + routing: cat + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: "pod" + k8s: + pod: + name: "cat" + uid: "947e4ced-1786-4e53-9e0c-5c447e959507" + ip: "10.10.55.1" + network: + tx: 2001818691 + rx: 802133794 + +--- +"index over bulk with routing": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + bulk: + index: test + body: + - '{"index": {"routing": "cat"}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + - match: {items.0.index.error.reason: "routing cannot be set because the destination index [test] is in time series mode"} + +--- +"index into routed alias": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.put_alias: + index: test + name: alias_with_routing + body: + index_routing: ir + + - do: + catch: /alias routing incompatible the destination index \[test\] because it is in time series mode/ + index: + index: alias_with_routing + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: "pod" + k8s: + pod: + name: "cat" + uid: "947e4ced-1786-4e53-9e0c-5c447e959507" + ip: "10.10.55.1" + network: + tx: 2001818691 + rx: 802133794 + +--- +"index over bulk into routed alias": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.put_alias: + index: test + name: alias_with_routing + body: + index_routing: ir + + - do: + bulk: + index: alias_with_routing + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + - match: {items.0.index.error.reason: "alias routing incompatible the destination index [test] because it is in time series mode"} + +# TODO should indexing with an id fail too? +# TODO should indexing without a @timestamp fail too? +# TODO should an alias with search_routing fail? diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_dimension_types.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_dimension_types.yml new file mode 100644 index 0000000000000..ab0e91da5fd1d --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_dimension_types.yml @@ -0,0 +1,202 @@ +keyword dimension: + - skip: + features: close_to + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + uid: + type: keyword + dimension: true + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "uid": "947e4ced-1786-4e53-9e0c-5c447e959507", "voltage": 7.2}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:34.467Z", "uid": "947e4ced-1786-4e53-9e0c-5c447e959507", "voltage": 7.6}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:44.467Z", "uid": "947e4ced-1786-4e53-9e0c-5c447e959507", "voltage": 7.1}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:54.467Z", "uid": "947e4ced-1786-4e53-9e0c-5c447e959507", "voltage": 7.3}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.2}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:34.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.6}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:44.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.1}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:54.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.3}' + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {uid: 947e4ced-1786-4e53-9e0c-5c447e959507}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} + +--- +long dimension: + - skip: + features: close_to + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + id: + type: long + dimension: true + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "id": 1, "voltage": 7.2}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:34.467Z", "id": "1", "voltage": 7.6}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:44.467Z", "id": 1.0, "voltage": 7.1}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:54.467Z", "id": "001", "voltage": 7.3}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "id": 2, "voltage": 3.2}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:34.467Z", "id": 2, "voltage": 3.6}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:44.467Z", "id": 2, "voltage": 3.1}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:54.467Z", "id": 2, "voltage": 3.3}' + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {id: 1}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {id: 2}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} + +--- +ip dimension: + - skip: + features: close_to + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + ip: + type: ip + dimension: true + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "ip": "10.10.1.1", "voltage": 7.2}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:34.467Z", "ip": "10.10.1.1", "voltage": 7.6}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:44.467Z", "ip": "10.10.1.1", "voltage": 7.1}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:54.467Z", "ip": "::ffff:10.10.1.1", "voltage": 7.3}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "ip": "2001:0db8:85a3:0000:0000:8a2e:0370:7334", "voltage": 3.2}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:34.467Z", "ip": "2001:0db8:85a3:0:0:8a2e:0370:7334", "voltage": 3.6}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:44.467Z", "ip": "2001:0db8:85a3::8a2e:0370:7334", "voltage": 3.1}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:54.467Z", "ip": "2001:0db8:85a3::8a2e:0370:7334", "voltage": 3.3}' + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {ip: "10.10.1.1"}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {ip: "2001:db8:85a3::8a2e:370:7334"}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index c0a9699373c68..ae0bf8a935ed6 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -50,10 +50,12 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexClosedException; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.node.NodeClosedException; @@ -74,6 +76,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicIntegerArray; +import java.util.function.Function; import java.util.function.LongSupplier; import java.util.stream.Collectors; @@ -99,20 +102,22 @@ public class TransportBulkAction extends HandledTransportAction timeSeriesIdGeneratorLookup; @Inject public TransportBulkAction(ThreadPool threadPool, TransportService transportService, ClusterService clusterService, IngestService ingestService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - IndexingPressure indexingPressure, SystemIndices systemIndices) { + IndexingPressure indexingPressure, SystemIndices systemIndices, IndicesService indicesService) { this(threadPool, transportService, clusterService, ingestService, client, actionFilters, - indexNameExpressionResolver, indexingPressure, systemIndices, System::nanoTime); + indexNameExpressionResolver, indexingPressure, systemIndices, indicesService.getTimeSeriesGeneratorLookup(), System::nanoTime); } public TransportBulkAction(ThreadPool threadPool, TransportService transportService, ClusterService clusterService, IngestService ingestService, NodeClient client, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexingPressure indexingPressure, SystemIndices systemIndices, + Function timeSeriesIdGeneratorLookup, LongSupplier relativeTimeProvider) { super(BulkAction.NAME, transportService, actionFilters, BulkRequest::new, ThreadPool.Names.SAME); Objects.requireNonNull(relativeTimeProvider); @@ -125,6 +130,7 @@ public TransportBulkAction(ThreadPool threadPool, TransportService transportServ this.indexNameExpressionResolver = indexNameExpressionResolver; this.indexingPressure = indexingPressure; this.systemIndices = systemIndices; + this.timeSeriesIdGeneratorLookup = timeSeriesIdGeneratorLookup; clusterService.addStateApplier(this.ingestForwarder); } @@ -341,6 +347,21 @@ static void prohibitCustomRoutingOnDataStream(DocWriteRequest writeRequest, M } } + static void prohibitInTimeSeriesMode(DocWriteRequest writeRequest, IndexAbstraction abstraction) { + if (abstraction == null || abstraction.getWriteIndex() == null) { + return; + } + if (abstraction.getWriteIndex().inTimeSeriesMode()) { + throw new IllegalArgumentException( + "[" + + writeRequest.opType() + + "] is not supported because the destination index [" + + abstraction.getName() + + "] is in time series mode" + ); + } + } + boolean isOnlySystem(BulkRequest request, SortedMap indicesLookup, SystemIndices systemIndices) { return request.getIndices().stream().allMatch(indexName -> isSystemIndex(indicesLookup, systemIndices, indexName)); } @@ -448,14 +469,16 @@ protected void doRun() { final IndexMetadata indexMetadata = metadata.index(concreteIndex); MappingMetadata mappingMd = indexMetadata.mapping(); Version indexCreated = indexMetadata.getCreationVersion(); - indexRequest.resolveRouting(metadata); + indexRequest.resolveRouting(metadata, indexAbstraction, timeSeriesIdGeneratorLookup); indexRequest.process(indexCreated, mappingMd, concreteIndex.getName()); break; case UPDATE: + prohibitInTimeSeriesMode(docWriteRequest, indexAbstraction); TransportUpdateAction.resolveAndValidateRouting(metadata, concreteIndex.getName(), (UpdateRequest) docWriteRequest); break; case DELETE: + prohibitInTimeSeriesMode(docWriteRequest, indexAbstraction); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); // check if routing is required, if so, throw error if routing wasn't specified if (docWriteRequest.routing() == null && metadata.routingRequired(concreteIndex.getName())) { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index b74d6676a0eb6..b71f337dd8c9b 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -19,9 +19,10 @@ import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.metadata.IndexAbstraction; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -29,19 +30,26 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.function.Function; import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -605,8 +613,56 @@ public void process(Version indexCreatedVersion, @Nullable MappingMetadata mappi } /* resolve the routing if needed */ - public void resolveRouting(Metadata metadata) { - routing(metadata.resolveWriteIndexRouting(routing, index)); + public void resolveRouting( + Metadata metadata, + IndexAbstraction abstraction, + Function timeSeriesGeneratorLookup + ) { + // TODO clean this up once we tsid is its own field + String routingFromAliasOrRequest = metadata.resolveWriteIndexRouting(routing(), index); + boolean inTimeSeriesMode = abstraction == null || abstraction.getWriteIndex() == null + ? false + : abstraction.getWriteIndex().inTimeSeriesMode(); + if (inTimeSeriesMode) { + if (routing() != null) { + throw new IllegalArgumentException( + "routing cannot be set because the destination index [" + abstraction.getName() + "] is in time series mode" + ); + } + if (routingFromAliasOrRequest != null) { + throw new IllegalArgumentException( + "alias routing incompatible the destination index [" + abstraction.getName() + "] because it is in time series mode" + ); + } + routing(routingFromTimeSeries(abstraction, timeSeriesGeneratorLookup)); + } else { + // Update the routing on the request with information from the alias. + routing(routingFromAliasOrRequest); + } + } + + private String routingFromTimeSeries( + IndexAbstraction abstraction, + Function timeSeriesGeneratorLookup + ) { + if (abstraction == null || abstraction.getWriteIndex() == null) { + return null; + } + TimeSeriesIdGenerator gen = timeSeriesGeneratorLookup.apply(abstraction.getWriteIndex()); + if (gen == null) { + return null; + } + try { + try ( + XContentParser parser = contentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, source.streamInput()) + ) { + // TODO switch to native BytesRef over the wire + return Base64.getEncoder().encodeToString(BytesReference.toBytes(gen.generate(parser))); + } + } catch (IOException | IllegalArgumentException e) { + throw new IllegalArgumentException("Error building time series id: " + e.getMessage(), e); + } } public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 5383f2804a101..33d168941c57d 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -32,12 +32,12 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -176,6 +176,11 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< protected void shardOperation(final UpdateRequest request, final ActionListener listener, final int retryCount) { final ShardId shardId = request.getShardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + if (indexService.getMetadata().inTimeSeriesMode()) { + throw new IllegalArgumentException( + "[UPDATE] is not supported because the destination index [" + shardId.getIndexName() + "] is in time series mode" + ); + } final IndexShard indexShard = indexService.getShard(shardId.getId()); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis); switch (result.getResponseResult()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 1dd640d0a8d35..49c555f1cb556 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -12,6 +12,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.Assertions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; @@ -23,14 +24,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParserUtils; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; @@ -41,8 +34,17 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Nullable; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexLongFieldRange; @@ -389,6 +391,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final boolean isSystem; private final IndexLongFieldRange timestampRange; + private final boolean timeSeriesMode; private IndexMetadata( final Index index, @@ -415,7 +418,8 @@ private IndexMetadata( final ActiveShardCount waitForActiveShards, final ImmutableOpenMap rolloverInfos, final boolean isSystem, - final IndexLongFieldRange timestampRange) { + final IndexLongFieldRange timestampRange, + final boolean inTimeSeriesMode) { this.index = index; this.version = version; @@ -448,6 +452,7 @@ private IndexMetadata( this.rolloverInfos = rolloverInfos; this.isSystem = isSystem; this.timestampRange = timestampRange; + this.timeSeriesMode = inTimeSeriesMode; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -1281,6 +1286,7 @@ public IndexMetadata build() { } final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); + final boolean inTimeSeriesMode = IndexSettings.TIME_SERIES_MODE.get(settings); return new IndexMetadata( new Index(index, uuid), @@ -1307,7 +1313,8 @@ public IndexMetadata build() { waitForActiveShards, rolloverInfos.build(), isSystem, - timestampRange); + timestampRange, + inTimeSeriesMode); } public static void toXContent(IndexMetadata indexMetadata, XContentBuilder builder, ToXContent.Params params) throws IOException { @@ -1639,6 +1646,10 @@ public int getRoutingFactor() { return routingFactor; } + public boolean inTimeSeriesMode() { + return timeSeriesMode; + } + /** * Returns the source shard ID to split the given target shard off * @param shardId the id of the target shard to split into diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index cedf6375532b7..bc4b4b869b7b8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -180,7 +180,7 @@ public interface NonRestorableCustom extends Custom { private final SortedMap indicesLookup; - Metadata(String clusterUUID, boolean clusterUUIDCommitted, long version, CoordinationMetadata coordinationMetadata, + private Metadata(String clusterUUID, boolean clusterUUIDCommitted, long version, CoordinationMetadata coordinationMetadata, Settings transientSettings, Settings persistentSettings, DiffableStringMap hashesOfConsistentSettings, ImmutableOpenMap indices, ImmutableOpenMap templates, ImmutableOpenMap customs, String[] allIndices, String[] visibleIndices, String[] allOpenIndices, @@ -558,6 +558,7 @@ public String resolveWriteIndexRouting(@Nullable String routing, String aliasOrI return routing; } + // TODO this is the same sort of code we have in bulk action already. we should share IndexAbstraction result = getIndicesLookup().get(aliasOrIndex); if (result == null || result.getType() != IndexAbstraction.Type.ALIAS) { return routing; diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 8e0f5de3d9e64..67b99475be6c7 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -36,6 +36,7 @@ import org.elasticsearch.indices.ShardLimitValidator; import java.util.Collections; +import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.Predicate; @@ -48,7 +49,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { public static final Predicate INDEX_SETTINGS_KEY_PREDICATE = (s) -> s.startsWith(IndexMetadata.INDEX_SETTING_PREFIX); - public static final Set> BUILT_IN_INDEX_SETTINGS = Set.of( + private static final Set> ALWAYS_ENABLED_BUILT_IN_INDEX_SETTINGS = Set.of( MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY, MergeSchedulerConfig.AUTO_THROTTLE_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, @@ -176,6 +177,17 @@ public final class IndexScopedSettings extends AbstractScopedSettings { Property.IndexScope), // this allows similarity settings to be passed Setting.groupSetting("index.analysis.", Property.IndexScope)); // this allows analysis settings to be passed + public static final Set> BUILT_IN_INDEX_SETTINGS = builtInIndexSettings(); + + private static Set> builtInIndexSettings() { + if (false == IndexSettings.isTimeSeriesModeEnabled()) { + return ALWAYS_ENABLED_BUILT_IN_INDEX_SETTINGS; + } + Set> result = new HashSet<>(ALWAYS_ENABLED_BUILT_IN_INDEX_SETTINGS); + result.add(IndexSettings.TIME_SERIES_MODE); + return Set.copyOf(result); + } + public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); public IndexScopedSettings(Settings settings, Set> settingsSet) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index c36db7e5a385d..82846e2ec3a57 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Strings; import org.apache.lucene.index.MergePolicy; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.logging.Loggers; @@ -19,7 +20,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.node.Node; @@ -317,6 +320,29 @@ public final class IndexSettings { public static final Setting FILE_BASED_RECOVERY_THRESHOLD_SETTING = Setting.doubleSetting("index.recovery.file_based_threshold", 0.1d, 0.0d, Setting.Property.IndexScope); + private static final Boolean TIME_SERIES_MODE_FEATURE_FLAG_REGISTERED; + + static { + final String property = System.getProperty("es.time_series_mode_feature_flag_registered"); + if (Build.CURRENT.isSnapshot() && property != null) { + throw new IllegalArgumentException("es.time_series_mode_feature_flag_registered is only supported in non-snapshot builds"); + } + TIME_SERIES_MODE_FEATURE_FLAG_REGISTERED = Booleans.parseBoolean(property, null); + } + + public static boolean isTimeSeriesModeEnabled() { + return Build.CURRENT.isSnapshot() || (TIME_SERIES_MODE_FEATURE_FLAG_REGISTERED != null && TIME_SERIES_MODE_FEATURE_FLAG_REGISTERED); + } + + /** + * Is the index in time series mode? Time series mode indices are + * automatically routed and sorted on a the + * {@link TimeSeriesIdFieldMapper _tsid} field. {@code _tsid} itself + * is automatically {@link TimeSeriesIdGenerator generated} using + * the fields marked as "dimensions". + */ + public static final Setting TIME_SERIES_MODE = Setting.boolSetting("index.time_series_mode", false, Property.IndexScope); + private final Index index; private final Version version; private final Logger logger; @@ -396,6 +422,15 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { */ private volatile int maxRegexLength; + /** + * Is the index in time series mode? Time series mode indices are + * automatically routed and sorted on a the + * {@link TimeSeriesIdFieldMapper _tsid} field. {@code _tsid} itself + * is automatically {@link TimeSeriesIdGenerator generated} using + * the fields marked as "dimensions". + */ + private final boolean timeSeriesMode; + /** * Returns the default search fields for this index. */ @@ -497,6 +532,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); this.mergePolicyConfig = new MergePolicyConfig(logger, this); + timeSeriesMode = scopedSettings.get(TIME_SERIES_MODE); this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -1033,4 +1069,8 @@ public long getMappingDimensionFieldsLimit() { private void setMappingDimensionFieldsLimit(long value) { this.mappingDimensionFieldsLimit = value; } + + public boolean inTimeSeriesMode() { + return timeSeriesMode; + } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index 7f4765c1c534e..a529ff28e4d72 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -112,12 +112,31 @@ private static MultiValueMode parseMultiValueMode(String value) { final FieldSortSpec[] sortSpecs; private final Version indexCreatedVersion; private final String indexName; + private final boolean timeSeriesMode; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); this.indexCreatedVersion = indexSettings.getIndexVersionCreated(); this.indexName = indexSettings.getIndex().getName(); + this.timeSeriesMode = indexSettings.inTimeSeriesMode(); + List fields = INDEX_SORT_FIELD_SETTING.get(settings); + if (timeSeriesMode) { + if (false == fields.isEmpty()) { + throw new IllegalArgumentException("Can't set [" + INDEX_SORT_FIELD_SETTING.getKey() + "] in time series mode"); + } + if (INDEX_SORT_ORDER_SETTING.exists(settings)) { + throw new IllegalArgumentException("Can't set [" + INDEX_SORT_ORDER_SETTING.getKey() + "] in time series mode"); + } + if (INDEX_SORT_MODE_SETTING.exists(settings)) { + throw new IllegalArgumentException("Can't set [" + INDEX_SORT_MODE_SETTING.getKey() + "] in time series mode"); + } + if (INDEX_SORT_MISSING_SETTING.exists(settings)) { + throw new IllegalArgumentException("Can't set [" + INDEX_SORT_MISSING_SETTING.getKey() + "] in time series mode"); + } + this.sortSpecs = new FieldSortSpec[] { new FieldSortSpec("_tsid"), new FieldSortSpec("@timestamp") }; + return; + } this.sortSpecs = fields.stream() .map((name) -> new FieldSortSpec(name)) .toArray(FieldSortSpec[]::new); @@ -184,7 +203,11 @@ public Sort buildIndexSort(Function fieldTypeLookup, FieldSortSpec sortSpec = sortSpecs[i]; final MappedFieldType ft = fieldTypeLookup.apply(sortSpec.field); if (ft == null) { - throw new IllegalArgumentException("unknown index sort field:[" + sortSpec.field + "]"); + String err = "unknown index sort field:[" + sortSpec.field + "]"; + if (timeSeriesMode) { + err += " required by [" + IndexSettings.TIME_SERIES_MODE.getKey() + "]"; + } + throw new IllegalArgumentException(err); } if (Objects.equals(ft.name(), sortSpec.field) == false) { if (this.indexCreatedVersion.onOrAfter(Version.V_7_13_0)) { diff --git a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java new file mode 100644 index 0000000000000..88adb3f3a9c5e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java @@ -0,0 +1,329 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index; + +import org.apache.lucene.util.ByteBlockPool; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.apache.lucene.util.compress.LZ4; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.core.CheckedConsumer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Builds the time series id ({@code _tsid}) from a {@code _source}, a + * {@link byte[]} that uniquely identifies the time series to which that + * {@code _source} belongs. Put another way: if you think of the + * {@code _source} as a document describing the state of some real + * world thing then the {@code _tsid} uniquely + * identifies that thing. + *

The Story

+ *

+ * Uniquely identifying a thing is useful if, say, that thing exposes + * an ever increasing counter and you want to calculate its rate of change. + * The ur-example of this sort of thing is routers and the ur-implementation + * of the rate of change is RRDTool. + * More modern examples are things like + * Prometheus. These are wonderful tools + * with good ideas. + *

+ * Those systems born from time series data typically group the metrics by their + * unique time series identifier, sorted by time. Like, that's the on disk + * structure. For RRDTool it is literally a file named for the time series + * containing a couple of circular buffers for measurements. The index into the + * buffer is pretty much {@code (time - start_offset)/resolution}. + *

+ * Elasticsearch, being a search engine at heart, doesn't organize things that + * way. All of our "time based" data, including metric data, is organized in + * indices that roughly correspond to a time range. This is useful because we + * build many write-once files (segments) to power our search engine. Deleting + * documents in an index is expensive so we prefer you delete the entire index + * when it gets too old. Logan's Run for data. + *

+ * Those roughly time ranged indices are further sharded based on a routing key, + * usually a document's randomly generated id. But for time series data we really + * want to group documents corresponding to the same real world thing together. + * So we route on {@code _tsid} instead. Further, we sort sort each segment on + * {@code _tsid, @timestamp} to put the documents right next to + * each other. This allows even simple compression algorithsm like {@link LZ4} + * to do a very good job compressing the {@code _source}. + *

+ * Let's take a quite detour to talk about why this compression is important. We + * tend to store much more data than the {@code _tsid} and the measurement + * in each document. In documents about k8s containers, for example, we'll store + * the container's id (the {@code _tsid}) and the memory usage (a measurement) but + * we'll also store the url of the image used to build the pod, the pod's name, the + * name of the node it's running on, the name of the agent sampling the data, the + * version of the agent sampling the data, etc. All of that "extra" is useful and + * we should get the best compression out of it we can. + *

The Constraints

+ * As much as we'd like to be a wonderful storage system for metrics we really don't + * want to rewrite any of Elasticsearch's traditional assumptions. Elasticsearch + * has always stored the original {@code _source} document, for example, and we + * stick to that proud tradition, working hard to compress it well. + *

+ * Similarly, Elasticsearch has always been ok with documents arriving "out of + * order". By that, I mean that documents that correspond to a measurement taken + * at a certain time can get delayed. And that Elasticsearch doesn't mind those + * delays. It'll accept the document. We'd like to keep this behavior. + *

+ * Elasticsearch has traditionally been quite ok adding new fields to an index + * on the fly. And we'd like to keep that too. Even for dimension fields. So, if + * you add a new dimension to an existing index we have to make sure it doesn't + * get confused and start trying to route documents to the wrong place. + *

+ * The length of the {@code _tsid} doesn't really matter very much. Remember, we're + * grouping like documents together. So they compress well. It costs bytes over + * the wire and in memory so it shouldn't be massive, but it doesn't need to be + * super tiny. + *

+ * When it comes time use the {@code _tsid} to do fun stuff like detect the rate + * of change of a counter it would screw things up if two "counters" got the same + * {@code _tsid}. + *

+ * It's pretty useful to be able to "parse" the {@code _tsid} into the values + * of the dimensions that it encoded. It's useful to be able to do that without + * any index metadata. + *

The {@code _tsid}

+ * Given these constraints the {@code _tsid} is just the field + * names and their values plus a little type information. + *

Where does this happen?!

+ * We generate the {@code _tsid} on the node coordinating the index action so that + * it can be included in the routing. + */ +public final class TimeSeriesIdGenerator { + /** + * The maximum length of the tsid. The value itself comes from a range check in + * Lucene's writer for utf-8 doc values. + */ + private static final int LIMIT = ByteBlockPool.BYTE_BLOCK_SIZE - 2; + /** + * Maximum length of the name of dimension. We picked this so that we could + * comfortable fit 16 dimensions inside {@link #LIMIT}. + */ + private static final int DIMENSION_NAME_LIMIT = 512; + /** + * The maximum length of any single dimension. We picked this so that we could + * comfortable fit 16 dimensions inside {@link #LIMIT}. This should be quite + * comfortable given that dimensions are typically going to be less than a + * hundred bytes each, but we're being paranoid here. + */ + private static final int DIMENSION_VALUE_LIMIT = 1024; + + private final ObjectComponent root; + + public TimeSeriesIdGenerator(ObjectComponent root) { + if (root == null) { + throw new IllegalArgumentException( + "Index configured with [" + + IndexSettings.TIME_SERIES_MODE.getKey() + + "] requires at least one field configured with [dimension:true]" + ); + } + root.collectDimensionNames("", name -> { + int bytes = UnicodeUtil.calcUTF16toUTF8Length(name, 0, name.length()); + if (bytes > DIMENSION_NAME_LIMIT) { + throw new IllegalArgumentException( + "Dimension name must be less than [" + DIMENSION_NAME_LIMIT + "] bytes but [" + name + "] was [" + bytes + "]" + ); + } + }); + this.root = root; + } + + @Override + public String toString() { + return "extract dimensions using " + root; + } + + /** + * Build the tsid from the {@code _source}. See class docs for more on what it looks like and why. + */ + public BytesReference generate(XContentParser parser) throws IOException { + List>> values = new ArrayList<>(); + parser.nextToken(); + root.extract(values, "", parser); + if (values.isEmpty()) { + List dimensionNames = new ArrayList<>(); + root.collectDimensionNames("", dimensionNames::add); + Collections.sort(dimensionNames); + throw new IllegalArgumentException("Document must contain one of the dimensions " + dimensionNames); + } + Collections.sort(values, Comparator.comparing(Map.Entry::getKey)); + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(values.size()); + for (Map.Entry> v : values) { + out.writeBytesRef(new BytesRef(v.getKey())); // Write in utf-8 instead of writeString's utf-16-ish thing + v.getValue().accept(out); + } + BytesReference bytes = out.bytes(); + if (bytes.length() > LIMIT) { + throw new IllegalArgumentException("tsid longer than [" + LIMIT + "] bytes [" + bytes.length() + "]"); + } + return bytes; + } + } + + /** + * Parse the {@code _tsid} into a human readable map. + */ + public static Map parse(StreamInput in) throws IOException { + int size = in.readVInt(); + Map result = new LinkedHashMap(size); + for (int i = 0; i < size; i++) { + String name = in.readString(); + try { + int type = in.read(); + switch (type) { + case (byte) 's': + result.put(name, in.readBytesRef().utf8ToString()); + break; + case (byte) 'l': + result.put(name, in.readLong()); + break; + default: + throw new IllegalArgumentException("known type [" + type + "]"); + } + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("can't parse [" + name + "]: " + e.getMessage(), e); + } + } + return result; + } + + public abstract static class Component { + private Component() {} + + abstract void extract( + List>> values, + String name, + XContentParser parser + ) throws IOException; + + abstract void collectDimensionNames(String name, Consumer consumer); + } + + public static final class ObjectComponent extends Component { + private final Map components; + + public ObjectComponent(Map components) { + this.components = components; + for (Map.Entry c : components.entrySet()) { + if (c.getValue() == null) { + throw new IllegalStateException("null components not supported but [" + c.getKey() + "] was null"); + } + } + } + + @Override + void extract(List>> values, String name, XContentParser parser) + throws IOException { + ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser); + while (parser.nextToken() != Token.END_OBJECT) { + String fieldName = parser.currentName(); + parser.nextToken(); + Component sub = components.get(fieldName); + if (sub == null) { + parser.skipChildren(); + continue; + } + sub.extract(values, name.isEmpty() ? fieldName : name + "." + fieldName, parser); + } + } + + @Override + void collectDimensionNames(String name, Consumer consumer) { + for (Map.Entry c : components.entrySet()) { + c.getValue().collectDimensionNames(name.isEmpty() ? c.getKey() : name + "." + c.getKey(), consumer); + } + } + + @Override + public String toString() { + return components.toString(); + } + } + + public abstract static class LeafComponent extends Component { + private LeafComponent() {} + + protected abstract CheckedConsumer extractLeaf(XContentParser parser) throws IOException; + + @Override + void extract(List>> values, String name, XContentParser parser) + throws IOException { + if (parser.currentToken() == XContentParser.Token.START_ARRAY || parser.currentToken() == XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Dimensions must be single valued but got [" + parser.currentToken() + "]"); + } + try { + values.add(Map.entry(name, extractLeaf(parser))); + } catch (IllegalArgumentException | IOException e) { + throw new IllegalArgumentException("error extracting dimension [" + name + "]: " + e.getMessage(), e); + } + } + + @Override + void collectDimensionNames(String name, Consumer dimensionNames) { + dimensionNames.accept(name); + } + } + + public abstract static class StringLeaf extends LeafComponent { + protected abstract String extractString(XContentParser parser) throws IOException; + + @Override + protected final CheckedConsumer extractLeaf(XContentParser parser) throws IOException { + String value = extractString(parser); + if (value == null) { + throw new IllegalArgumentException("null values not allowed"); + } + /* + * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish + * so its easier for folks to reason about the space taken up. Mostly + * it'll be smaller too. + */ + BytesRef bytes = new BytesRef(value); + if (bytes.length > DIMENSION_VALUE_LIMIT) { + throw new IllegalArgumentException("longer than [" + DIMENSION_VALUE_LIMIT + "] bytes [" + bytes.length + "]"); + } + return out -> { + out.write((byte) 's'); + out.writeBytesRef(bytes); + }; + } + } + + public abstract static class LongLeaf extends LeafComponent { + protected abstract long extractLong(XContentParser parser) throws IOException; + + @Override + protected final CheckedConsumer extractLeaf(XContentParser parser) throws IOException { + long value = extractLong(parser); + return out -> { + out.write((byte) 'l'); + out.writeLong(value); + }; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 5fe45568b18b2..65995b036d533 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -10,12 +10,14 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.TimeSeriesIdGenerator; public class DocumentMapper { private final String type; private final CompressedXContent mappingSource; private final MappingLookup mappingLookup; private final DocumentParser documentParser; + private final TimeSeriesIdGenerator timeSeriesIdGenerator; /** * Create a new {@link DocumentMapper} that holds empty mappings. @@ -26,14 +28,15 @@ public static DocumentMapper createEmpty(MapperService mapperService) { RootObjectMapper root = new RootObjectMapper.Builder(MapperService.SINGLE_MAPPING_NAME).build(new ContentPath(1)); MetadataFieldMapper[] metadata = mapperService.getMetadataMappers().values().toArray(new MetadataFieldMapper[0]); Mapping mapping = new Mapping(root, metadata, null); - return new DocumentMapper(mapperService.documentParser(), mapping); + return new DocumentMapper(mapperService.documentParser(), mapping, mapperService.getIndexSettings().inTimeSeriesMode()); } - DocumentMapper(DocumentParser documentParser, Mapping mapping) { + DocumentMapper(DocumentParser documentParser, Mapping mapping, boolean inTimeSeriesMode) { this.documentParser = documentParser; this.type = mapping.getRoot().name(); this.mappingLookup = MappingLookup.fromMapping(mapping); this.mappingSource = mapping.toCompressedXContent(); + timeSeriesIdGenerator = inTimeSeriesMode ? mapping.buildTimeSeriesIdGenerator() : null; } public Mapping mapping() { @@ -68,6 +71,10 @@ public IndexFieldMapper IndexFieldMapper() { return metadataMapper(IndexFieldMapper.class); } + public TimeSeriesIdGenerator getTimeSeriesIdGenerator() { + return timeSeriesIdGenerator; + } + public MappingLookup mappers() { return this.mappingLookup; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 608041a4dcd35..bf9dc447365bf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -22,8 +22,12 @@ import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.TimeSeriesIdGenerator; +import org.elasticsearch.index.TimeSeriesIdGenerator.Component; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -443,34 +447,28 @@ protected String contentType() { @Override protected void parseCreateField(DocumentParserContext context) throws IOException { - Object addressAsObject = context.parser().textOrNull(); - - if (addressAsObject == null) { - addressAsObject = nullValue; - } - - if (addressAsObject == null) { - return; - } - - String addressAsString = addressAsObject.toString(); InetAddress address; - if (addressAsObject instanceof InetAddress) { - address = (InetAddress) addressAsObject; - } else { - try { - address = InetAddresses.forString(addressAsString); - } catch (IllegalArgumentException e) { - if (ignoreMalformed) { - context.addIgnoredField(fieldType().name()); - return; - } else { - throw e; - } + try { + address = value(context.parser(), nullValue); + } catch (IllegalArgumentException e) { + if (ignoreMalformed) { + context.addIgnoredField(fieldType().name()); + return; + } else { + throw e; } } + if (address != null) { + indexValue(context, address); + } + } - indexValue(context, address); + private static InetAddress value(XContentParser parser, InetAddress nullValue) throws IOException { + String value = parser.textOrNull(); + if (value == null) { + return nullValue; + } + return InetAddresses.forString(value); } private void indexValue(DocumentParserContext context, InetAddress address) { @@ -508,4 +506,38 @@ public FieldMapper.Builder getMergeBuilder() { return new Builder(simpleName(), scriptCompiler, ignoreMalformedByDefault, indexCreatedVersion).dimension(dimension).init(this); } + @Override + protected Component selectTimeSeriesIdComponents() { + if (false == dimension) { + return null; + } + return timeSeriesIdGenerator(nullValue); + } + + public static TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(InetAddress nullValue) { + if (nullValue == null) { + return IpTsidGen.DEFAULT; + } + return new IpTsidGen(nullValue); + } + private static class IpTsidGen extends TimeSeriesIdGenerator.StringLeaf { + private static final IpTsidGen DEFAULT = new IpTsidGen(null); + + private final InetAddress nullValue; + + IpTsidGen(InetAddress nullValue) { + this.nullValue = nullValue; + } + + @Override + protected String extractString(XContentParser parser) throws IOException { + InetAddress value = value(parser, nullValue); + return value == null ? null : NetworkAddress.format(value); + } + + @Override + public String toString() { + return "ip[" + nullValue + "]"; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 1a3db9d4f96d5..1ac661f271a56 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.AutomatonQueries; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -69,6 +70,8 @@ public static class Defaults { FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); FIELD_TYPE.freeze(); } + + public static final int IGNORE_ABOVE = Integer.MAX_VALUE; } public static class KeywordField extends Field { @@ -95,7 +98,7 @@ public static class Builder extends FieldMapper.Builder { private final Parameter eagerGlobalOrdinals = Parameter.boolParam("eager_global_ordinals", true, m -> toType(m).eagerGlobalOrdinals, false); private final Parameter ignoreAbove - = Parameter.intParam("ignore_above", true, m -> toType(m).ignoreAbove, Integer.MAX_VALUE); + = Parameter.intParam("ignore_above", true, m -> toType(m).ignoreAbove, Defaults.IGNORE_ABOVE); private final Parameter indexOptions = Parameter.restrictedStringParam("index_options", false, m -> toType(m).indexOptions, "docs", "freqs"); @@ -481,15 +484,14 @@ public KeywordFieldType fieldType() { @Override protected void parseCreateField(DocumentParserContext context) throws IOException { - String value; - XContentParser parser = context.parser(); + indexValue(context, value(context.parser(), nullValue)); + } + + private static String value(XContentParser parser, String nullValue) throws IOException { if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { - value = nullValue; - } else { - value = parser.textOrNull(); + return nullValue; } - - indexValue(context, value); + return parser.textOrNull(); } @Override @@ -578,4 +580,43 @@ public FieldMapper.Builder getMergeBuilder() { return new Builder(simpleName(), indexAnalyzers, scriptCompiler).dimension(dimension).init(this); } + @Override + protected TimeSeriesIdGenerator.Component selectTimeSeriesIdComponents() { + if (false == dimension) { + return null; + } + if (ignoreAbove != Defaults.IGNORE_ABOVE) { + throw new IllegalArgumentException("[ignore_above] not supported by dimensions"); + } + if (normalizerName != null) { + throw new IllegalArgumentException("[normalizer] not supported by dimensions"); + } + return new KeywordTsidGen(nullValue); + } + + public static TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(String nullValue) { + if (nullValue == null) { + return KeywordTsidGen.DEFAULT; + } + return new KeywordTsidGen(nullValue); + } + private static class KeywordTsidGen extends TimeSeriesIdGenerator.StringLeaf { + private static final KeywordTsidGen DEFAULT = new KeywordTsidGen(null); + + private final String nullValue; + + KeywordTsidGen(String nullValue) { + this.nullValue = nullValue; + } + + @Override + protected String extractString(XContentParser parser) throws IOException { + return value(parser, nullValue); + } + + @Override + public String toString() { + return "kwd[" + nullValue + "]"; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 2afe7f78cd4ac..0e95bbbf74719 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.index.TimeSeriesIdGenerator; import java.util.Map; import java.util.Objects; @@ -66,4 +67,7 @@ public final String simpleName() { */ public abstract void validate(MappingLookup mappers); + protected TimeSeriesIdGenerator.Component selectTimeSeriesIdComponents() { + return null; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index d18588c09a38e..0896ddfe6a2e0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -284,7 +284,7 @@ private synchronized DocumentMapper mergeAndApplyMappings(String mappingType, Co } private DocumentMapper newDocumentMapper(Mapping mapping, MergeReason reason) { - DocumentMapper newMapper = new DocumentMapper(documentParser, mapping); + DocumentMapper newMapper = new DocumentMapper(documentParser, mapping, indexSettings.inTimeSeriesMode()); newMapper.mapping().getRoot().fixRedundantIncludes(); newMapper.validate(indexSettings, reason != MergeReason.MAPPING_RECOVERY); return newMapper; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 4e1c0037e9148..e8b4387f36c9f 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.mapper.MapperService.MergeReason; import java.io.IOException; @@ -188,4 +189,8 @@ public String toString() { throw new UncheckedIOException(bogus); } } + + TimeSeriesIdGenerator buildTimeSeriesIdGenerator() { + return new TimeSeriesIdGenerator(root.selectTimeSeriesIdComponents()); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index e66f829c9cfc9..b20906a90e5a7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -269,6 +270,11 @@ public List createFields(String name, Number value, return fields; } + @Override + public TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce) { + throw new IllegalArgumentException("[half_float] is not a valid dimension"); + } + private void validateParsed(float value) { if (Float.isFinite(HalfFloatPoint.sortableShortToHalfFloat(HalfFloatPoint.halfFloatToSortableShort(value))) == false) { throw new IllegalArgumentException("[half_float] supports only finite values, but got [" + value + "]"); @@ -365,6 +371,11 @@ public List createFields(String name, Number value, return fields; } + @Override + public TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce) { + throw new IllegalArgumentException("[float] is not a valid dimension"); + } + private void validateParsed(float value) { if (Float.isFinite(value) == false) { throw new IllegalArgumentException("[float] supports only finite values, but got [" + value + "]"); @@ -445,6 +456,11 @@ public List createFields(String name, Number value, return fields; } + @Override + public TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce) { + throw new IllegalArgumentException("[double] is not a valid dimension"); + } + private void validateParsed(double value) { if (Double.isFinite(value) == false) { throw new IllegalArgumentException("[double] supports only finite values, but got [" + value + "]"); @@ -511,6 +527,14 @@ public List createFields(String name, Number value, Number valueForSearch(Number value) { return value.byteValue(); } + + @Override + public TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce) { + if (nullValue == null && coerce) { + return WholeNumberTsidGen.BYTE; + } + return new WholeNumberTsidGen(BYTE, nullValue, coerce); + } }, SHORT("short", NumericType.SHORT) { @Override @@ -568,6 +592,14 @@ public List createFields(String name, Number value, Number valueForSearch(Number value) { return value.shortValue(); } + + @Override + public TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce) { + if (nullValue == null && coerce) { + return WholeNumberTsidGen.SHORT; + } + return new WholeNumberTsidGen(SHORT, nullValue, coerce); + } }, INTEGER("integer", NumericType.INT) { @Override @@ -686,6 +718,14 @@ public List createFields(String name, Number value, } return fields; } + + @Override + public TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce) { + if (nullValue == null && coerce) { + return WholeNumberTsidGen.INTEGER; + } + return new WholeNumberTsidGen(INTEGER, nullValue, coerce); + } }, LONG("long", NumericType.LONG) { @Override @@ -773,6 +813,14 @@ public List createFields(String name, Number value, } return fields; } + + @Override + public TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce) { + if (nullValue == null && coerce) { + return WholeNumberTsidGen.LONG; + } + return new WholeNumberTsidGen(LONG, nullValue, coerce); + } }; private final String name; @@ -806,6 +854,7 @@ public abstract Query rangeQuery(String field, Object lowerTerm, Object upperTer public abstract Number parsePoint(byte[] value); public abstract List createFields(String name, Number value, boolean indexed, boolean docValued, boolean stored); + public abstract TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(Number nullValue, boolean coerce); public FieldValues compile(String fieldName, Script script, ScriptCompiler compiler) { // only implemented for long and double fields @@ -1145,7 +1194,7 @@ protected String contentType() { protected void parseCreateField(DocumentParserContext context) throws IOException { Number value; try { - value = value(context.parser(), type, nullValue, coerce.value()); + value = value(context.parser(), type, nullValue, coerce()); } catch (InputCoercionException | IllegalArgumentException | JsonParseException e) { if (ignoreMalformed.value() && context.parser().currentToken().isValue()) { context.addIgnoredField(mappedFieldType.name()); @@ -1210,4 +1259,46 @@ public FieldMapper.Builder getMergeBuilder() { return new Builder(simpleName(), type, builder.scriptCompiler, ignoreMalformedByDefault, coerceByDefault) .dimension(dimension).init(this); } + + @Override + protected TimeSeriesIdGenerator.LeafComponent selectTimeSeriesIdComponents() { + if (false == dimension) { + return null; + } + if (ignoreMalformed.value()) { + throw new IllegalArgumentException("Dimensions can not ignore_malformed"); + } + return type.timeSeriesIdGenerator(nullValue, coerce.value()); + } + + private static class WholeNumberTsidGen extends TimeSeriesIdGenerator.LongLeaf { + private static final WholeNumberTsidGen BYTE = new WholeNumberTsidGen(NumberType.BYTE, null, true); + private static final WholeNumberTsidGen SHORT = new WholeNumberTsidGen(NumberType.SHORT, null, true); + private static final WholeNumberTsidGen INTEGER = new WholeNumberTsidGen(NumberType.INTEGER, null, true); + private static final WholeNumberTsidGen LONG = new WholeNumberTsidGen(NumberType.LONG, null, true); + + private final NumberType numberType; + private final Number nullValue; + private final boolean coerce; + + WholeNumberTsidGen(NumberType numberType, Number nullValue, boolean coerce) { + this.numberType = numberType; + this.nullValue = nullValue; + this.coerce = coerce; + } + + @Override + protected long extractLong(XContentParser parser) throws IOException { + Number value = value(parser, numberType, nullValue, coerce); + if (value == null) { + throw new IllegalArgumentException("null values not allowed"); + } + return value.longValue(); + } + + @Override + public String toString() { + return numberType + "[" + nullValue + "," + coerce + "]"; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index 50aa6322634c1..7daaadaa29632 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.mapper.MapperService.MergeReason; import java.io.IOException; @@ -430,4 +431,20 @@ protected void serializeMappers(XContentBuilder builder, Params params) throws I protected void doXContent(XContentBuilder builder, Params params) throws IOException { } + + @Override + protected TimeSeriesIdGenerator.ObjectComponent selectTimeSeriesIdComponents() { + Map components = null; + for (Mapper mapper : this) { + TimeSeriesIdGenerator.Component sub = mapper.selectTimeSeriesIdComponents(); + if (sub == null) { + continue; + } + if (components == null) { + components = new HashMap<>(); + } + components.put(mapper.simpleName(), sub); + } + return components == null ? null : new TimeSeriesIdGenerator.ObjectComponent(components); + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index 905861db18469..bab89856cfc14 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -102,6 +102,11 @@ public boolean required() { @Override public void preParse(DocumentParserContext context) { String routing = context.sourceToParse().routing(); + if (context.indexSettings().inTimeSeriesMode()) { + // TODO when we stop storing the tsid in the routing fail any request with routing in time series mode + // the routing will always come from the time series id. + return; + } if (routing != null) { context.doc().add(new Field(fieldType().name(), routing, Defaults.FIELD_TYPE)); context.addToFieldNames(fieldType().name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java new file mode 100644 index 0000000000000..028e017455029 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +public class TimeSeriesIdFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_tsid"; + public static final String CONTENT_TYPE = "_tsid"; + public static final TimeSeriesIdFieldType FIELD_TYPE = new TimeSeriesIdFieldType(); + + @Override + public FieldMapper.Builder getMergeBuilder() { + return new Builder().init(this); + } + + public static class Builder extends MetadataFieldMapper.Builder { + protected Builder() { + super(NAME); + } + + @Override + protected List> getParameters() { + return List.of(); + } + + @Override + public TimeSeriesIdFieldMapper build() { + return new TimeSeriesIdFieldMapper(); + } + } + + public static final TypeParser PARSER = new ConfigurableTypeParser( + c -> new TimeSeriesIdFieldMapper(), + c -> new Builder() + ); + + public static final class TimeSeriesIdFieldType extends MappedFieldType { + private TimeSeriesIdFieldType() { + super(NAME, false, false, true, TextSearchInfo.NONE, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new DocValueFetcher(docValueFormat(format, null), context.getForField(this)); + } + + @Override + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return DocValueFormat.TIME_SERIES_ID; + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + failIfNoDocValues(); + // TODO don't leak the TSID's binary format into the script + return new SortedSetOrdinalsIndexFieldData.Builder(name(), CoreValuesSourceType.KEYWORD); + } + + @Override + public Query termQuery(Object value, SearchExecutionContext context) { + throw new IllegalArgumentException("[" + NAME + "] is not searchable"); + } + } + + private TimeSeriesIdFieldMapper() { + super(FIELD_TYPE, Lucene.KEYWORD_ANALYZER); + } + + @Override + public void preParse(DocumentParserContext context) throws IOException { + if (false == context.indexSettings().inTimeSeriesMode()) { + return; + } + assert fieldType().isSearchable() == false; + + String routing = context.sourceToParse().routing(); + if (routing == null) { + throw new IllegalArgumentException("In time series mode the tsid need to be in the routing"); + } + // TODO switch to native BytesRef over the wire, leaving the routing alone + BytesRef value = new BytesRef(Base64.getDecoder().decode(routing)); + context.doc().add(new SortedSetDocValuesField(fieldType().name(), value)); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java index 2570d3b30afa6..3778f79a0aee9 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/CoordinatorRewriteContext.java @@ -58,6 +58,7 @@ boolean hasTimestampData() { @Nullable public MappedFieldType getFieldType(String fieldName) { + // TODO use time series id generation? if (fieldName.equals(timestampFieldType.name()) == false) { return null; } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index e5bffcd4f8df8..c88696961fa62 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -14,23 +14,30 @@ import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardSizeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.resync.TransportResyncReplicationAction; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.BooleanFieldMapper; +import org.elasticsearch.index.mapper.BooleanScriptFieldType; import org.elasticsearch.index.mapper.CompletionFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.DateScriptFieldType; import org.elasticsearch.index.mapper.DocCountFieldMapper; +import org.elasticsearch.index.mapper.DoubleScriptFieldType; import org.elasticsearch.index.mapper.FieldAliasMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.GeoPointFieldMapper; +import org.elasticsearch.index.mapper.GeoPointScriptFieldType; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.mapper.IpFieldMapper; +import org.elasticsearch.index.mapper.IpScriptFieldType; import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.KeywordScriptFieldType; +import org.elasticsearch.index.mapper.LongScriptFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -44,6 +51,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; @@ -53,13 +61,6 @@ import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.index.mapper.BooleanScriptFieldType; -import org.elasticsearch.index.mapper.DateScriptFieldType; -import org.elasticsearch.index.mapper.DoubleScriptFieldType; -import org.elasticsearch.index.mapper.GeoPointScriptFieldType; -import org.elasticsearch.index.mapper.IpScriptFieldType; -import org.elasticsearch.index.mapper.KeywordScriptFieldType; -import org.elasticsearch.index.mapper.LongScriptFieldType; import java.util.Arrays; import java.util.Collections; @@ -175,6 +176,7 @@ private static Map initBuiltInMetadataMa // (so will benefit from "fields: []" early termination builtInMetadataMappers.put(IdFieldMapper.NAME, IdFieldMapper.PARSER); builtInMetadataMappers.put(RoutingFieldMapper.NAME, RoutingFieldMapper.PARSER); + builtInMetadataMappers.put(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.PARSER); builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index bd602415568af..3c8fb5a5487d4 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -80,6 +80,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.bulk.stats.BulkStats; import org.elasticsearch.index.cache.request.ShardRequestCache; @@ -233,15 +234,7 @@ public class IndicesService extends AbstractLifecycleComponent private final ValuesSourceRegistry valuesSourceRegistry; private final TimestampFieldMapperService timestampFieldMapperService; private final CheckedBiConsumer requestCacheKeyDifferentiator; - - @Override - protected void doStart() { - // Start thread that will manage cleaning the field data cache periodically - threadPool.schedule(this.cacheCleaner, this.cleanInterval, ThreadPool.Names.SAME); - - // Start watching for timestamp fields - clusterService.addStateApplier(timestampFieldMapperService); - } + private final TimeSeriesIdGeneratorService timeSeriesIdGeneratorService; public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv, NamedXContentRegistry xContentRegistry, AnalysisRegistry analysisRegistry, IndexNameExpressionResolver indexNameExpressionResolver, @@ -340,6 +333,17 @@ protected void closeInternal() { clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); this.timestampFieldMapperService = new TimestampFieldMapperService(settings, threadPool, this); + this.timeSeriesIdGeneratorService = TimeSeriesIdGeneratorService.build(settings, threadPool, this); + } + + @Override + protected void doStart() { + // Start thread that will manage cleaning the field data cache periodically + threadPool.schedule(this.cacheCleaner, this.cleanInterval, ThreadPool.Names.SAME); + + // Start watching for mapping changes + clusterService.addStateApplier(timestampFieldMapperService); + clusterService.addStateApplier(timeSeriesIdGeneratorService); } private static final String DANGLING_INDICES_UPDATE_THREAD_NAME = "DanglingIndices#updateTask"; @@ -352,6 +356,8 @@ public ClusterService clusterService() { protected void doStop() { clusterService.removeApplier(timestampFieldMapperService); timestampFieldMapperService.doStop(); + clusterService.removeApplier(timeSeriesIdGeneratorService); + timeSeriesIdGeneratorService.doStop(); ThreadPool.terminate(danglingIndicesThreadPoolExecutor, 10, TimeUnit.SECONDS); @@ -513,7 +519,10 @@ public boolean hasIndex(Index index) { } /** - * Returns an IndexService for the specified index if exists otherwise returns null. + * Returns an IndexService for the specified index if exists + * locally otherwise returns null. + * If the index exists in the cluster state but not locally this will + * return null. */ @Override @Nullable @@ -1702,4 +1711,7 @@ public DateFieldMapper.DateFieldType getTimestampFieldType(Index index) { return timestampFieldMapperService.getTimestampFieldType(index); } + public Function getTimeSeriesGeneratorLookup() { + return timeSeriesIdGeneratorService; + } } diff --git a/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java new file mode 100644 index 0000000000000..3a9aa9e1673d8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java @@ -0,0 +1,386 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import com.carrotsearch.hppc.cursors.ObjectCursor; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateApplier; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterApplierService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.LazyInitializable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.TimeSeriesIdGenerator; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.node.Node; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + +/** + * Looks up the {@link TimeSeriesIdGenerator} for an index. If the index is + * local we read from the local metadata. If the index isn't local we parse + * the mapping, read it, and cache it. + */ +class TimeSeriesIdGeneratorService extends AbstractLifecycleComponent + implements + ClusterStateApplier, + Function { + private static final Logger logger = LogManager.getLogger(TimeSeriesIdGeneratorService.class); + + public interface LocalIndex { + long metadataVersion(); + + TimeSeriesIdGenerator generator(); + } + + private final Function lookupLocalIndex; + private final Function buildTimeSeriedIdGenerator; + private final ExecutorService executor; // single thread to construct mapper services async as needed + private final Map byIndex = ConcurrentCollections.newConcurrentMap(); + + static TimeSeriesIdGeneratorService build(Settings nodeSettings, ThreadPool threadPool, IndicesService indicesService) { + String nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(nodeSettings)); + String threadName = String.format(Locale.ROOT, "%s/%s#updateTask", nodeName, TimeSeriesIdGeneratorService.class.getSimpleName()); + ExecutorService executor = EsExecutors.newScaling( + threadName, + 0, + 1, + 0, + TimeUnit.MILLISECONDS, + daemonThreadFactory(nodeName, threadName), + threadPool.getThreadContext() + ); + + Function lookupLocalIndex = index -> { + IndexService local = indicesService.indexService(index); + return local == null ? null : new LocalIndex() { + @Override + public long metadataVersion() { + return local.getMetadata().getVersion(); + } + + @Override + public TimeSeriesIdGenerator generator() { + return local.mapperService().documentMapper().getTimeSeriesIdGenerator(); + } + }; + }; + + Function buildTimeSeriedIdGenerator = indexMetadata -> { + ClusterApplierService.assertNotClusterStateUpdateThread("decompressed the mapping of many indices"); + try { + try (MapperService tmp = indicesService.createIndexMapperService(indexMetadata)) { + tmp.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY); + TimeSeriesIdGenerator gen = tmp.documentMapper().getTimeSeriesIdGenerator(); + logger.trace("computed timeseries id generator for {}", indexMetadata.getIndex()); + return gen; + } + } catch (IOException e) { + // Whatever happened here is unrecoverable and likely a bug so IllegalStateException which'll turn into HTTP 500 + throw new IllegalStateException("error building time series id generator: " + e.getMessage(), e); + } + }; + + return new TimeSeriesIdGeneratorService(executor, lookupLocalIndex, buildTimeSeriedIdGenerator); + } + + TimeSeriesIdGeneratorService( + ExecutorService executor, + Function lookupLocalIndex, + Function buildTimeSeriedIdGenerator + ) { + this.executor = executor; + this.lookupLocalIndex = lookupLocalIndex; + this.buildTimeSeriedIdGenerator = buildTimeSeriedIdGenerator; + } + + @Override + protected void doStart() {} + + @Override + protected void doStop() { + ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); + } + + @Override + protected void doClose() {} + + @Override + public TimeSeriesIdGenerator apply(IndexMetadata meta) { + if (false == meta.inTimeSeriesMode()) { + return null; + } + Value v = byIndex.get(meta.getIndex()); + /* + * v is rebuilt in applyClusterState which should have happened-before + * whatever made meta available to the rest of the system. So the if + * statement below really shouldn't fail. + */ + if (meta.getMappingVersion() > v.mappingVersion) { + throw new IllegalStateException( + "Got a newer version fo the index than the time series id generator [" + + meta.getMappingVersion() + + "] vs [" + + v.mappingVersion + + "]" + ); + } + /* + * Because TimeSeriesIdGenerators only "get bigger" it should be safe + * to use whatever is in the map, even if it is for a newer version of + * index. + */ + return v.generator(); + } + + @Override + public void applyClusterState(ClusterChangedEvent event) { + applyClusterState(event.state().metadata()); + } + + void applyClusterState(Metadata metadata) { + /* + * Update the "byIndex" map containing the generators in three phases: + * 1. Remove any deletes indices. + * 2. Update any indices hosted on this node or who's mapping hasn't + * changed. + * 3. Update remaining indices. These are slower but we can reuse any + * generators built for indices with the same mapping. + */ + byIndex.keySet().removeIf(index -> metadata.index(index) == null); + + Map dedupe = new HashMap<>(); + + for (ObjectCursor cursor : metadata.indices().values()) { + IndexMetadata indexMetadata = cursor.value; + if (false == indexMetadata.inTimeSeriesMode()) { + continue; + } + Index index = indexMetadata.getIndex(); + DedupeKey key = new DedupeKey(indexMetadata); + + /* + * Find indices who's mapping hasn't changed. + */ + Value old = byIndex.get(index); + if (old != null && old.mappingVersion == indexMetadata.getMappingVersion()) { + logger.trace("reusing previous timeseries id generator for {}", index); + dedupe.put(key, old); + continue; + } + + /* + * Check if the mapping is the same as something we've already seen. + */ + Value value = dedupe.get(key); + if (value != null) { + logger.trace("reusing timeseries id from another index for {}", index); + byIndex.put(index, value.withMappingVersion(indexMetadata.getMappingVersion())); + continue; + } + + /* + * Find indices that we're hosting locally. In production this + * looks up against IndicesService which is a "high priority" + * update consumer so it's cluster state updates + * "happen-before" this one. + */ + LocalIndex localIndex = lookupLocalIndex.apply(index); + if (localIndex == null) { + logger.trace("timeseries id for {} is not available locally", index); + continue; + } + logger.trace("computing timeseries id generator for {} using local index service", index); + if (localIndex.metadataVersion() < indexMetadata.getVersion()) { + throw new IllegalStateException( + "Trying to update timeseries id with an older version of the metadata [" + + localIndex.metadataVersion() + + "] vs [" + + indexMetadata.getVersion() + + "]" + ); + } + value = new PreBuiltValue(indexMetadata.getMappingVersion(), localIndex.generator()); + byIndex.put(index, value); + dedupe.put(key, value); + } + + /* + * Update the remaining indices. + */ + for (ObjectCursor cursor : metadata.indices().values()) { + IndexMetadata indexMetadata = cursor.value; + if (false == indexMetadata.inTimeSeriesMode()) { + continue; + } + Index index = indexMetadata.getIndex(); + + Value old = byIndex.get(index); + if (old != null && old.mappingVersion == indexMetadata.getMappingVersion()) { + // We already updated the generator in the first pass + continue; + } + + DedupeKey key = new DedupeKey(indexMetadata); + Value value = dedupe.get(key); + if (value == null) { + logger.trace("computing timeseries id generator for {} async", index); + value = new AsyncValue(indexMetadata.getMappingVersion(), buildTimeSeriedIdGenerator, executor, indexMetadata); + } else { + logger.trace("reusing timeseries id from another index for {}", index); + value = value.withMappingVersion(indexMetadata.getMappingVersion()); + } + byIndex.put(index, value); + } + } + + private abstract static class Value { + private final long mappingVersion; + + protected Value(long mappingVersion) { + this.mappingVersion = mappingVersion; + } + + abstract TimeSeriesIdGenerator generator(); + + abstract Value withMappingVersion(long newMappingVersion); + } + + private static class PreBuiltValue extends Value { + private final TimeSeriesIdGenerator generator; + + PreBuiltValue(long mappingVersion, TimeSeriesIdGenerator generator) { + super(mappingVersion); + this.generator = generator; + } + + @Override + TimeSeriesIdGenerator generator() { + return generator; + } + + @Override + Value withMappingVersion(long newMappingVersion) { + return new PreBuiltValue(newMappingVersion, generator); + } + } + + /** + * Build the {@link TimeSeriesIdGenerator} async from the cluster state + * update thread. Creating this will queue a task to build the generator + * on the separate thread but return immediately. Callers to + * {@link #generator()} race that queued task. If they win they will + * build the {@link TimeSeriesIdGenerator} and if they lose they'll return + * a cached copy. + */ + private static class AsyncValue extends Value { + private final LazyInitializable lazy; + + private AsyncValue(long mappingVersion, LazyInitializable lazy) { + super(mappingVersion); + this.lazy = lazy; + } + + AsyncValue( + long mappingVersion, + Function buildTimeSeriesIdGenerator, + ExecutorService executor, + IndexMetadata indexMetadata + ) { + /* + * This closes over indexMetadata and keeps a reference to it + * for as long as the AsyncValue lives which is ok. It isn't the + * only thing with such a reference. + */ + this(mappingVersion, new LazyInitializable<>(() -> buildTimeSeriesIdGenerator.apply(indexMetadata))); + executor.execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + generator(); + } + + @Override + public void onFailure(Exception e) { + /* + * We failed to build the time series id generator which sad, + * but we don't have to make a ton of noise about it because + * when someone goes to use it they'll attempt to build it + * again. If *they* fail then it'll throw an exception to + * the caller which'll get reported back over http. + */ + logger.debug( + new ParameterizedMessage("error building timeseries id generator for {} async", indexMetadata.getIndex()), + e + ); + } + }); + } + + @Override + TimeSeriesIdGenerator generator() { + return lazy.getOrCompute(); + } + + @Override + Value withMappingVersion(long newMappingVersion) { + return new AsyncValue(newMappingVersion, lazy); + } + } + + /** + * Key for deduplicating mappings. In an ideal world we'd just use the + * mapping's {@link CompressedXContent} but {@link CompressedXContent#equals(Object)} + * will try to decompress the mapping if the crc matches but the compressed bytes + * don't. That's wasteful for us - probably for everyone. If the crc and compressed + * bytes match that's a match. + */ + private static class DedupeKey { // TODO Just use CompressedXContent and remove unzipping + private final CompressedXContent mapping; + + DedupeKey(IndexMetadata meta) { + this.mapping = meta.mapping().source(); + } + + @Override + public int hashCode() { + return mapping.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + DedupeKey other = (DedupeKey) obj; + return mapping.hashCode() == other.mapping.hashCode() && Arrays.equals(mapping.compressed(), other.mapping.compressed()); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 0a72baa232c07..d24feea7f4128 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -19,6 +20,7 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.geometry.utils.Geohash; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; @@ -655,4 +657,35 @@ public double parseDouble(String value, boolean roundUp, LongSupplier now) { return Double.parseDouble(value); } }; + + DocValueFormat TIME_SERIES_ID = new TimeSeriesIdDocValueFormat(); + + /** + * DocValues format for time series id. + */ + class TimeSeriesIdDocValueFormat implements DocValueFormat { + private TimeSeriesIdDocValueFormat() {} + + @Override + public String getWriteableName() { + return "tsid"; + } + + @Override + public void writeTo(StreamOutput out) {} + + @Override + public String toString() { + return "tsid"; + } + + @Override + public Object format(BytesRef value) { + try { + return TimeSeriesIdGenerator.parse(new BytesArray(value).streamInput()); + } catch (IOException e) { + throw new IllegalArgumentException("error formatting tsid: " + e.getMessage(), e); + } + } + }; } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 4f35bf10c94bb..e4419693b18d7 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -710,6 +710,7 @@ private void registerValueFormats() { registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW); registerValueFormat(DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY); registerValueFormat(DocValueFormat.UNSIGNED_LONG_SHIFTED.getWriteableName(), in -> DocValueFormat.UNSIGNED_LONG_SHIFTED); + registerValueFormat(DocValueFormat.TIME_SERIES_ID.getWriteableName(), in -> DocValueFormat.TIME_SERIES_ID); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index e7fc0cead4c74..2dba1c571cd95 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -78,6 +78,9 @@ public int compareKey(Bucket other) { @Override protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + if (format == DocValueFormat.TIME_SERIES_ID) { + return builder.field(CommonFields.KEY.getPreferredName(), format.format(termBytes)); + } return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString()); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 9067e2a1e02ba..51456f95e1906 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -22,10 +22,10 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; @@ -116,7 +116,7 @@ public boolean hasIndexAbstraction(String indexAbstraction, ClusterState state) TransportBulkAction action = new TransportBulkAction(threadPool, mock(TransportService.class), clusterService, null, null, mock(ActionFilters.class), indexNameExpressionResolver, - new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE) { + new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, meta -> null, System::currentTimeMillis) { @Override void executeBulk(Task task, BulkRequest bulkRequest, long startTimeNanos, ActionListener listener, AtomicArray responses, Map indicesThatCannotBeCreated) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index 20af23dd26cf8..bc415f3419213 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -31,12 +31,12 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; @@ -132,7 +132,9 @@ class TestTransportBulkAction extends TransportBulkAction { null, new ActionFilters(Collections.emptySet()), TestIndexNameExpressionResolver.newInstance(), new IndexingPressure(SETTINGS), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + meta -> null, + System::currentTimeMillis ); } @@ -163,6 +165,7 @@ public void setupAction() { // initialize captors, which must be members to use @Capture because of generics threadPool = mock(ThreadPool.class); when(threadPool.executor(anyString())).thenReturn(EsExecutors.DIRECT_EXECUTOR_SERVICE); + MockitoAnnotations.initMocks(this); // setup services that will be called by action transportService = mock(TransportService.class); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index f51f3c3f1acc6..f3a29ddbcbee8 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -30,9 +30,10 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.VersionType; import org.elasticsearch.indices.EmptySystemIndices; @@ -61,7 +62,6 @@ import static org.hamcrest.Matchers.is; public class TransportBulkActionTests extends ESTestCase { - /** Services needed by bulk action */ private TransportService transportService; private ClusterService clusterService; @@ -75,9 +75,19 @@ class TestTransportBulkAction extends TransportBulkAction { boolean indexCreated = false; // set when the "real" index is created TestTransportBulkAction() { - super(TransportBulkActionTests.this.threadPool, transportService, clusterService, null, - null, new ActionFilters(Collections.emptySet()), new Resolver(), - new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE); + super( + TransportBulkActionTests.this.threadPool, + transportService, + clusterService, + null, + null, + new ActionFilters(Collections.emptySet()), + new Resolver(), + new IndexingPressure(Settings.EMPTY), + EmptySystemIndices.INSTANCE, + meta -> null, + System::nanoTime + ); } @Override @@ -237,6 +247,39 @@ public void testProhibitCustomRoutingOnDataStream() throws Exception { prohibitCustomRoutingOnDataStream(writeRequestAgainstIndex, metadata); } + public void testProhibitedInTimeSeriesModeWithoutATarget() throws Exception { + // Doesn't throw + TransportBulkAction.prohibitInTimeSeriesMode(prohibitedInTimeSeriesMode(), null); + } + + public void testProhibitedInTimeSeriesModeNotInTimeSeriesMode() throws Exception { + Settings settings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + IndexMetadata writeIndex = IndexMetadata.builder("idx").settings(settings).numberOfReplicas(0).numberOfShards(1).build(); + // Doesn't throw + TransportBulkAction.prohibitInTimeSeriesMode(prohibitedInTimeSeriesMode(), new IndexAbstraction.Index(writeIndex)); + } + + public void testProhibitedInTimeSeriesMode() throws Exception { + Settings settings = Settings.builder() + .put("index.version.created", Version.CURRENT) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), true) + .build(); + IndexMetadata writeIndex = IndexMetadata.builder("idx").settings(settings).numberOfReplicas(0).numberOfShards(1).build(); + DocWriteRequest prohibited = prohibitedInTimeSeriesMode(); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> TransportBulkAction.prohibitInTimeSeriesMode(prohibited, new IndexAbstraction.Index(writeIndex)) + ); + assertThat( + e.getMessage(), + equalTo("[" + prohibited.opType() + "] is not supported because the destination index [idx] is in time series mode") + ); + } + + private DocWriteRequest prohibitedInTimeSeriesMode() { + return randomBoolean() ? new UpdateRequest("idx", "0") : new DeleteRequest("idx").id("0"); + } + public void testOnlySystem() { SortedMap indicesLookup = new TreeMap<>(); Settings settings = Settings.builder().put("index.version.created", Version.CURRENT).build(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java index 7ab1fa75fe587..6bc0802d6cef7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTookTests.java @@ -226,6 +226,7 @@ static class TestTransportBulkAction extends TransportBulkAction { indexNameExpressionResolver, new IndexingPressure(Settings.EMPTY), EmptySystemIndices.INSTANCE, + meta -> null, relativeTimeProvider); } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index cc9582be7efb6..1cfe961e83df2 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -9,13 +9,16 @@ package org.elasticsearch.index; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataService; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -26,10 +29,13 @@ import org.elasticsearch.test.ESTestCase; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -135,13 +141,8 @@ public void testInvalidMissing() { " must be one of [_last, _first]")); } - public void testIndexSorting() { + public void testIndexSortingNoDocValues() { IndexSettings indexSettings = indexSettings(Settings.builder().put("index.sort.field", "field").build()); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService, null); MappedFieldType fieldType = new MappedFieldType("field", false, false, false, TextSearchInfo.NONE, Collections.emptyMap()) { @Override public String typeName() { @@ -164,13 +165,7 @@ public Query termQuery(Object value, SearchExecutionContext context) { throw new UnsupportedOperationException(); } }; - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> config.buildIndexSort( - field -> fieldType, - (ft, searchLookupSupplier) -> indexFieldDataService.getForField(ft, "index", searchLookupSupplier) - ) - ); + Exception iae = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, fieldType)); assertEquals("docvalues not found for index sort field:[field]", iae.getMessage()); assertThat(iae.getCause(), instanceOf(UnsupportedOperationException.class)); assertEquals("index sorting not supported on runtime field [field]", iae.getCause().getMessage()); @@ -178,16 +173,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { public void testSortingAgainstAliases() { IndexSettings indexSettings = indexSettings(Settings.builder().put("index.sort.field", "field").build()); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService, null); - MappedFieldType mft = new KeywordFieldMapper.KeywordFieldType("aliased"); - Exception e = expectThrows(IllegalArgumentException.class, () -> config.buildIndexSort( - field -> mft, - (ft, s) -> indexFieldDataService.getForField(ft, "index", s) - )); + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Exception e = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, Map.of("field", aliased))); assertEquals("Cannot use alias [field] as an index sort field", e.getMessage()); } @@ -195,17 +182,42 @@ public void testSortingAgainstAliasesPre713() { IndexSettings indexSettings = indexSettings(Settings.builder() .put("index.version.created", Version.V_7_12_0) .put("index.sort.field", "field").build()); + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Sort sort = buildIndexSort(indexSettings, Map.of("field", aliased)); + assertThat(sort.getSort(), arrayWithSize(1)); + assertThat(sort.getSort()[0].getField(), equalTo("aliased")); + assertWarnings("Index sort for index [test] defined on field [field] which resolves to field [aliased]. " + + "You will not be able to define an index sort over aliased fields in new indexes"); + } + + public void testTimeSeriesMode() { + IndexSettings indexSettings = indexSettings(Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build()); + Sort sort = buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE, new DateFieldMapper.DateFieldType("@timestamp")); + assertThat(sort.getSort(), arrayWithSize(2)); + assertThat(sort.getSort()[0].getField(), equalTo("_tsid")); + assertThat(sort.getSort()[1].getField(), equalTo("@timestamp")); + } + + public void testTimeSeriesModeNoTimestamp() { + IndexSettings indexSettings = indexSettings(Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build()); + Exception e = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE)); + assertThat(e.getMessage(), equalTo("unknown index sort field:[@timestamp] required by [index.time_series_mode]")); + } + + private Sort buildIndexSort(IndexSettings indexSettings, MappedFieldType... mfts) { + Map lookup = new HashMap<>(mfts.length); + for (MappedFieldType mft : mfts) { + assertNull(lookup.put(mft.name(), mft)); + } + return buildIndexSort(indexSettings, lookup); + } + + private Sort buildIndexSort(IndexSettings indexSettings, Map lookup) { IndexSortConfig config = indexSettings.getIndexSortConfig(); assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); + IndicesFieldDataCache cache = new IndicesFieldDataCache(indexSettings.getSettings(), null); NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService, null); - MappedFieldType mft = new KeywordFieldMapper.KeywordFieldType("aliased"); - config.buildIndexSort( - field -> mft, - (ft, s) -> indexFieldDataService.getForField(ft, "index", s)); - - assertWarnings("Index sort for index [test] defined on field [field] which resolves to field [aliased]. " + - "You will not be able to define an index sort over aliased fields in new indexes"); + IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService, null); + return config.buildIndexSort(lookup::get, (ft, s) -> indexFieldDataService.getForField(ft, "index", s)); } } diff --git a/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java new file mode 100644 index 0000000000000..ac7b9ad62d746 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java @@ -0,0 +1,565 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index; + +import io.github.nik9000.mapmatcher.MapMatcher; + +import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.MapXContentParser; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.TimeSeriesIdGenerator.ObjectComponent; +import org.elasticsearch.index.mapper.IpFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; +import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; + +public class TimeSeriesIdGeneratorTests extends ESTestCase { + /** + * Test with non-randomized string for sanity checking. + */ + public void testStrings() throws IOException { + Map doc = Map.of("a", "foo", "b", "bar", "c", "baz", "o", Map.of("e", "bort")); + assertMap( + TimeSeriesIdGenerator.parse(keywordTimeSeriesIdGenerator().generate(parser(doc)).streamInput()), + matchesMap().entry("a", "foo").entry("o.e", "bort") + ); + } + + public void testKeywordTooLong() throws IOException { + Map doc = Map.of("a", "more_than_1024_bytes".repeat(52)); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> keywordTimeSeriesIdGenerator().generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: longer than [1024] bytes [1040]")); + } + + public void testKeywordTooLongUtf8() throws IOException { + String theWordLong = "長い"; + Map doc = Map.of("a", theWordLong.repeat(200)); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> keywordTimeSeriesIdGenerator().generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: longer than [1024] bytes [1200]")); + } + + public void testKeywordNull() throws IOException { + Map doc = new HashMap<>(); + doc.put("a", null); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> keywordTimeSeriesIdGenerator().generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: null values not allowed")); + } + + private TimeSeriesIdGenerator keywordTimeSeriesIdGenerator() { + return new TimeSeriesIdGenerator( + new ObjectComponent(Map.of("a", keywordComponent(), "o", new ObjectComponent(Map.of("e", keywordComponent())))) + ); + } + + /** + * Test with non-randomized longs for sanity checking. + */ + public void testLong() throws IOException { + Map doc = Map.of("a", 1, "b", -1, "c", "baz", "o", Map.of("e", "1234")); + assertMap( + TimeSeriesIdGenerator.parse(timeSeriedIdForNumberType(NumberType.LONG).generate(parser(doc)).streamInput()), + matchesMap().entry("a", 1L).entry("o.e", 1234L) + ); + } + + public void testLongInvalidString() throws IOException { + Map doc = Map.of("a", "not_a_long"); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.LONG).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: For input string: \"not_a_long\"")); + } + + public void testLongNull() throws IOException { + Map doc = new HashMap<>(); + doc.put("a", null); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.LONG).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), startsWith("error extracting dimension [a]: null values not allowed")); + } + + /** + * Test with non-randomized integers for sanity checking. + */ + public void testInteger() throws IOException { + Map doc = Map.of("a", 1, "b", -1, "c", "baz", "o", Map.of("e", Integer.MIN_VALUE)); + assertMap( + TimeSeriesIdGenerator.parse(timeSeriedIdForNumberType(NumberType.INTEGER).generate(parser(doc)).streamInput()), + matchesMap().entry("a", 1L).entry("o.e", (long) Integer.MIN_VALUE) + ); + } + + public void testIntegerInvalidString() throws IOException { + Map doc = Map.of("a", "not_an_int"); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.INTEGER).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: For input string: \"not_an_int\"")); + } + + public void testIntegerOutOfRange() throws IOException { + Map doc = Map.of("a", Long.MAX_VALUE); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.INTEGER).generate(parser(doc)).streamInput() + ); + assertThat( + e.getMessage(), + startsWith("error extracting dimension [a]: Numeric value (" + Long.MAX_VALUE + ") out of range of int") + ); + } + + public void testIntegerNull() throws IOException { + Map doc = new HashMap<>(); + doc.put("a", null); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.INTEGER).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), startsWith("error extracting dimension [a]: null values not allowed")); + } + + /** + * Test with non-randomized shorts for sanity checking. + */ + public void testShort() throws IOException { + Map doc = Map.of("a", 1, "b", -1, "c", "baz", "o", Map.of("e", (int) Short.MIN_VALUE)); + assertMap( + TimeSeriesIdGenerator.parse(timeSeriedIdForNumberType(NumberType.SHORT).generate(parser(doc)).streamInput()), + matchesMap().entry("a", 1L).entry("o.e", (long) Short.MIN_VALUE) + ); + } + + public void testShortInvalidString() throws IOException { + Map doc = Map.of("a", "not_a_short"); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.SHORT).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: For input string: \"not_a_short\"")); + } + + public void testShortOutOfRange() throws IOException { + Map doc = Map.of("a", Long.MAX_VALUE); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.SHORT).generate(parser(doc)).streamInput() + ); + assertThat( + e.getMessage(), + startsWith("error extracting dimension [a]: Numeric value (" + Long.MAX_VALUE + ") out of range of int") + ); + } + + public void testShortNull() throws IOException { + Map doc = new HashMap<>(); + doc.put("a", null); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.SHORT).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), startsWith("error extracting dimension [a]: null values not allowed")); + } + + /** + * Test with non-randomized shorts for sanity checking. + */ + public void testByte() throws IOException { + Map doc = Map.of("a", 1, "b", -1, "c", "baz", "o", Map.of("e", (int) Byte.MIN_VALUE)); + assertMap( + TimeSeriesIdGenerator.parse(timeSeriedIdForNumberType(NumberType.BYTE).generate(parser(doc)).streamInput()), + matchesMap().entry("a", 1L).entry("o.e", (long) Byte.MIN_VALUE) + ); + } + + public void testByteInvalidString() throws IOException { + Map doc = Map.of("a", "not_a_byte"); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.BYTE).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: For input string: \"not_a_byte\"")); + } + + public void testByteOutOfRange() throws IOException { + Map doc = Map.of("a", Long.MAX_VALUE); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.BYTE).generate(parser(doc)).streamInput() + ); + assertThat( + e.getMessage(), + startsWith("error extracting dimension [a]: Numeric value (" + Long.MAX_VALUE + ") out of range of int") + ); + } + + public void testByteNull() throws IOException { + Map doc = new HashMap<>(); + doc.put("a", null); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> timeSeriedIdForNumberType(NumberType.BYTE).generate(parser(doc)).streamInput() + ); + assertThat(e.getMessage(), startsWith("error extracting dimension [a]: null values not allowed")); + } + + private TimeSeriesIdGenerator timeSeriedIdForNumberType(NumberType numberType) { + return new TimeSeriesIdGenerator( + new ObjectComponent( + Map.of( + "a", + numberType.timeSeriesIdGenerator(null, true), + "o", + new ObjectComponent(Map.of("e", numberType.timeSeriesIdGenerator(null, true))) + ) + ) + ); + } + + /** + * Test with non-randomized ips for sanity checking. + */ + public void testIp() throws IOException { + Map doc = Map.of("a", "192.168.0.1", "b", -1, "c", "baz", "o", Map.of("e", "255.255.255.1")); + assertMap( + TimeSeriesIdGenerator.parse(timeSeriedIdForIp().generate(parser(doc)).streamInput()), + matchesMap().entry("a", "192.168.0.1").entry("o.e", "255.255.255.1") + ); + } + + public void testIpInvalidString() throws IOException { + Map doc = Map.of("a", "not_an_ip"); + Exception e = expectThrows(IllegalArgumentException.class, () -> timeSeriedIdForIp().generate(parser(doc)).streamInput()); + assertThat(e.getMessage(), equalTo("error extracting dimension [a]: 'not_an_ip' is not an IP string literal.")); + } + + public void testIpNull() throws IOException { + Map doc = new HashMap<>(); + doc.put("a", null); + Exception e = expectThrows(IllegalArgumentException.class, () -> timeSeriedIdForIp().generate(parser(doc)).streamInput()); + assertThat(e.getMessage(), startsWith("error extracting dimension [a]: null values not allowed")); + } + + private TimeSeriesIdGenerator timeSeriedIdForIp() { + return new TimeSeriesIdGenerator( + new ObjectComponent( + Map.of( + "a", + IpFieldMapper.timeSeriesIdGenerator(null), + "o", + new ObjectComponent(Map.of("e", IpFieldMapper.timeSeriesIdGenerator(null))) + ) + ) + ); + } + + /** + * Tests when the total of the tsid is more than 32k. + */ + public void testVeryLarge() { + String large = "many words ".repeat(50); + Map doc = new HashMap<>(); + Map components = new HashMap<>(); + for (int i = 0; i < 100; i++) { + doc.put("d" + i, large); + components.put("d" + i, keywordComponent()); + } + TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(new ObjectComponent(components)); + Exception e = expectThrows(IllegalArgumentException.class, () -> gen.generate(parser(doc))); + assertThat(e.getMessage(), equalTo("tsid longer than [32766] bytes [55691]")); + } + + /** + * Sending the same document twice produces the same value. + */ + public void testSameGenConsistentForSameDoc() throws IOException { + Map doc = randomDoc(between(1, 100), between(0, 2)); + TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(randomDimensionsFromDoc(doc))); + assertThat(gen.generate(parser(doc)), equalTo(gen.generate(parser(doc)))); + } + + /** + * Non dimension fields don't influence the value of the dimension. + */ + public void testExtraFieldsDoNotMatter() throws IOException { + Map doc = randomDoc(between(1, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(doc); + TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + assertThat(gen.generate(parser(dimensions)), equalTo(gen.generate(parser(doc)))); + } + + /** + * The order that the dimensions appear in the document do not influence the value. + */ + public void testOrderDoesNotMatter() throws IOException { + Map doc = randomDoc(between(1, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(doc); + TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + assertThat(gen.generate(parser(shuffled(doc))), equalTo(gen.generate(parser(doc)))); + } + + /** + * Dimensions that appear in the generator but not in the document don't influence the value. + */ + public void testUnusedExtraDimensions() throws IOException { + Map doc = randomDoc(between(1, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(doc); + TimeSeriesIdGenerator small = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + dimensions.put(randomValueOtherThanMany(doc::containsKey, () -> randomAlphaOfLength(5)), randomAlphaOfLength(3)); + TimeSeriesIdGenerator large = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + + assertThat(large.generate(parser(doc)), equalTo(small.generate(parser(doc)))); + } + + /** + * Different values for dimensions change the result. + */ + public void testDifferentValues() throws IOException { + Map orig = randomDoc(between(1, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(orig); + Map modified = modifyDimensionValue(orig, dimensions); + TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + assertThat(gen.generate(parser(modified)), not(equalTo(gen.generate(parser(orig))))); + } + + public void testParse() throws IOException { + Map doc = randomDoc(between(1, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(doc); + TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + assertMap(TimeSeriesIdGenerator.parse(gen.generate(parser(doc)).streamInput()), expectedParsedDimensions(dimensions)); + assertMap(TimeSeriesIdGenerator.parse(gen.generate(parser(shuffled(doc))).streamInput()), expectedParsedDimensions(dimensions)); + } + + private MapMatcher expectedParsedDimensions(Map originalDimensions) { + return flatten(matchesMap(), null, originalDimensions); + } + + private MapMatcher flatten(MapMatcher result, String name, Map originalDimensions) { + for (Map.Entry d : originalDimensions.entrySet()) { + String nextName = name == null ? d.getKey().toString() : name + "." + d.getKey(); + if (d.getValue() instanceof Map) { + result = flatten(result, nextName, (Map) d.getValue()); + } else { + result = result.entry(nextName, d.getValue()); + } + } + return result; + } + + private Map modifyDimensionValue(Map doc, Map dimensions) { + Object keyToModify = randomFrom(dimensions.keySet()); + + Map result = new LinkedHashMap<>(doc.size()); + for (Map.Entry e : doc.entrySet()) { + if (e.getKey().equals(keyToModify)) { + Object val = e.getValue(); + Object modified = val instanceof Map + ? modifyDimensionValue((Map) val, (Map) dimensions.get(e.getKey())) + : val + "modified"; + result.put(e.getKey().toString(), modified); + } else { + result.put(e.getKey().toString(), e.getValue()); + } + } + return result; + } + + /** + * Two documents with the same *values* but different dimension keys will generate + * different {@code _tsid}s. + */ + public void testDifferentDimensions() throws IOException { + Map origDoc = randomDoc(between(1, 10), between(0, 2)); + Map origDimensions = randomDimensionsFromDoc(origDoc); + TimeSeriesIdGenerator origGen = new TimeSeriesIdGenerator(objectComponentForDimensions(origDimensions)); + Tuple, Map> modified = modifyDimensionName(origDoc, origDimensions); + TimeSeriesIdGenerator modGen = new TimeSeriesIdGenerator(objectComponentForDimensions(modified.v2())); + assertThat(modGen.generate(parser(modified.v1())), not(equalTo(origGen.generate(parser(origDoc))))); + } + + private Tuple, Map> modifyDimensionName(Map doc, Map dimensions) { + Object keyToModify = randomFrom(dimensions.keySet()); + + Map modifiedDoc = new LinkedHashMap<>(doc.size()); + Map modifiedDimensions = new LinkedHashMap<>(doc.size()); + for (Map.Entry e : doc.entrySet()) { + if (e.getKey().equals(keyToModify)) { + if (e.getValue() instanceof Map) { + Tuple, Map> modifiedSub = modifyDimensionName( + (Map) e.getValue(), + (Map) dimensions.get(e.getKey()) + ); + modifiedDoc.put(e.getKey().toString(), modifiedSub.v1()); + modifiedDimensions.put(e.getKey().toString(), modifiedSub.v2()); + } else { + String modifiedKey = e.getKey() + "modified"; + modifiedDoc.put(modifiedKey, e.getValue()); + modifiedDimensions.put(modifiedKey, e.getValue()); + } + } else { + modifiedDoc.put(e.getKey().toString(), e.getValue()); + if (dimensions.containsKey(e.getKey())) { + modifiedDimensions.put(e.getKey().toString(), e.getValue()); + } + } + } + return new Tuple<>(modifiedDoc, modifiedDimensions); + } + + /** + * Documents with fewer dimensions have a different value. + */ + public void testFewerDimensions() throws IOException { + Map orig = randomDoc(between(2, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(orig, 2, 10); + Map modified = removeDimension(orig, dimensions); + TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + assertThat(gen.generate(parser(modified)), not(equalTo(gen.generate(parser(orig))))); + } + + /** + * Removes one of the dimensions from a document. + */ + private Map removeDimension(Map doc, Map dimensions) { + Object keyToRemove = randomFrom(dimensions.keySet()); + + Map result = new LinkedHashMap<>(doc.size()); + for (Map.Entry e : doc.entrySet()) { + if (e.getKey().equals(keyToRemove)) { + // If the dimension is an object then randomly remove it entirely or one of its leaf values + if (e.getValue() instanceof Map && randomBoolean()) { + result.put(e.getKey().toString(), removeDimension((Map) e.getValue(), (Map) dimensions.get(e.getKey()))); + } + } else { + result.put(e.getKey().toString(), e.getValue()); + } + } + return result; + } + + private LinkedHashMap randomDoc(int count, int subDepth) { + int keyLength = (int) Math.log(count) + 1; + LinkedHashMap doc = new LinkedHashMap<>(count); + for (int i = 0; i < count; i++) { + String key = randomValueOtherThanMany(doc::containsKey, () -> randomAlphaOfLength(keyLength)); + Object sub = subDepth <= 0 || randomBoolean() ? randomAlphaOfLength(5) : randomDoc(count, subDepth - 1); + doc.put(key, sub); + } + return doc; + } + + /** + * Extract a random subset of a document to use as dimensions. + */ + private LinkedHashMap randomDimensionsFromDoc(Map doc) { + return randomDimensionsFromDoc(doc, 1, 10); + } + + /** + * Extract a random subset of a document to use as dimensions. + */ + private LinkedHashMap randomDimensionsFromDoc(Map doc, int min, int max) { + LinkedHashMap result = new LinkedHashMap<>(); + int dimensionCount = between(min, Math.min(doc.size(), max)); + for (Map.Entry dim : randomSubsetOf(dimensionCount, doc.entrySet())) { + Object sub = dim.getValue() instanceof Map ? randomDimensionsFromDoc((Map) dim.getValue()) : dim.getValue(); + result.put(dim.getKey().toString(), sub); + } + return result; + } + + private TimeSeriesIdGenerator.ObjectComponent objectComponentForDimensions(Map docDimensions) { + Map subs = new HashMap<>(docDimensions.size()); + for (Map.Entry dim : docDimensions.entrySet()) { + subs.put(dim.getKey().toString(), componentForRepresentativeValue(dim.getValue())); + } + return new TimeSeriesIdGenerator.ObjectComponent(subs); + } + + private TimeSeriesIdGenerator.Component componentForRepresentativeValue(Object value) { + if (value instanceof Map) { + return objectComponentForDimensions((Map) value); + } + if (value instanceof String) { + try { + InetAddresses.forString((String) value); + return IpFieldMapper.timeSeriesIdGenerator(null); + } catch (IllegalArgumentException e) { + return keywordComponent(); + } + } + if (value instanceof Number) { + return NumberFieldMapper.NumberType.LONG.timeSeriesIdGenerator(null, false); + } + throw new IllegalArgumentException("Unknown dimension type [" + value + "][" + value.getClass() + "]"); + } + + private TimeSeriesIdGenerator.Component keywordComponent() { + return KeywordFieldMapper.timeSeriesIdGenerator(null); + } + + private XContentParser parser(Map doc) throws IOException { + // Convert the map to json so the parsers don't choke on the methods MapXContentParser doesn't implement + return createParser( + JsonXContent.contentBuilder() + .copyCurrentStructure( + new MapXContentParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + doc, + randomFrom(XContentType.values()) + ) + ) + ); + } + + private LinkedHashMap shuffled(Map orig) { + List> entries = new ArrayList<>(orig.entrySet()); + Collections.shuffle(entries, random()); + LinkedHashMap result = new LinkedHashMap(orig.size()); + for (Map.Entry e : entries) { + Object sub = e.getValue() instanceof Map ? shuffled((Map) e.getValue()) : e.getValue(); + result.put(e.getKey().toString(), sub); + } + return result; + } +} diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index 844ce55b5548f..63f5f9e7fe44b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -16,18 +16,29 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.MapXContentParser; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService.MergeReason; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static io.github.nik9000.mapmatcher.ListMatcher.matchesList; +import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -58,7 +69,7 @@ public void testAddFields() throws Exception { assertThat(stage1.mappers().getMapper("obj1.prop1"), nullValue()); // but merged should DocumentParser documentParser = new DocumentParser(null, null, null, null); - DocumentMapper mergedMapper = new DocumentMapper(documentParser, merged); + DocumentMapper mergedMapper = new DocumentMapper(documentParser, merged, false); assertThat(mergedMapper.mappers().getMapper("age"), notNullValue()); assertThat(mergedMapper.mappers().getMapper("obj1.prop1"), notNullValue()); } @@ -284,8 +295,59 @@ public void testEmptyDocumentMapper() { assertNotNull(documentMapper.idFieldMapper()); assertNotNull(documentMapper.sourceMapper()); assertNotNull(documentMapper.IndexFieldMapper()); - assertEquals(10, documentMapper.mappers().getMapping().getMetadataMappersMap().size()); - assertEquals(10, documentMapper.mappers().getMatchingFieldNames("*").size()); + List> metadataMappers = new ArrayList<>(documentMapper.mappers().getMapping().getMetadataMappersMap().keySet()); + Collections.sort(metadataMappers, Comparator.comparing(c -> c.getSimpleName())); + assertMap( + metadataMappers, + matchesList().item(DocCountFieldMapper.class) + .item(FieldNamesFieldMapper.class) + .item(IdFieldMapper.class) + .item(IgnoredFieldMapper.class) + .item(IndexFieldMapper.class) + .item(NestedPathFieldMapper.class) + .item(RoutingFieldMapper.class) + .item(SeqNoFieldMapper.class) + .item(SourceFieldMapper.class) + .item(TimeSeriesIdFieldMapper.class) + .item(VersionFieldMapper.class) + ); + List matching = new ArrayList<>(documentMapper.mappers().getMatchingFieldNames("*")); + Collections.sort(matching); + assertMap( + matching, + matchesList().item(DocCountFieldMapper.CONTENT_TYPE) + .item(FieldNamesFieldMapper.CONTENT_TYPE) + .item(IdFieldMapper.CONTENT_TYPE) + .item(IgnoredFieldMapper.CONTENT_TYPE) + .item(IndexFieldMapper.CONTENT_TYPE) + .item(NestedPathFieldMapper.NAME) + .item(RoutingFieldMapper.CONTENT_TYPE) + .item(SeqNoFieldMapper.CONTENT_TYPE) + .item(SourceFieldMapper.CONTENT_TYPE) + .item(TimeSeriesIdFieldMapper.CONTENT_TYPE) + .item(VersionFieldMapper.CONTENT_TYPE) + ); + } + + public void testContainsTimeSeriesGenerator() throws IOException { + DocumentMapper documentMapper = createMapperService( + Version.CURRENT, + Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + () -> false, + mapping(b -> b.startObject("dim").field("type", "keyword").field("dimension", true).endObject()) + ).documentMapper(); + assertThat( + TimeSeriesIdGenerator.parse(documentMapper.getTimeSeriesIdGenerator() + .generate( + new MapXContentParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + Map.of("dim", "foo"), + randomFrom(XContentType.values()) + ) + ).streamInput()), + equalTo(Map.of("dim", "foo")) + ); } public void testTooManyDimensionFields() { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index f9acded6340b5..d0ab317a55ba2 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; @@ -67,10 +68,18 @@ public Map getMetadataMappers() { } }); - private static final String[] EXPECTED_METADATA_FIELDS = new String[]{ IgnoredFieldMapper.NAME, IdFieldMapper.NAME, - RoutingFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, - NestedPathFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, DocCountFieldMapper.NAME, - FieldNamesFieldMapper.NAME }; + private static final String[] EXPECTED_METADATA_FIELDS = new String[] { + IgnoredFieldMapper.NAME, + IdFieldMapper.NAME, + RoutingFieldMapper.NAME, + TimeSeriesIdFieldMapper.NAME, + IndexFieldMapper.NAME, + SourceFieldMapper.NAME, + NestedPathFieldMapper.NAME, + VersionFieldMapper.NAME, + SeqNoFieldMapper.NAME, + DocCountFieldMapper.NAME, + FieldNamesFieldMapper.NAME }; public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); diff --git a/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java b/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java new file mode 100644 index 0000000000000..934ba51771b71 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java @@ -0,0 +1,220 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices; + +import org.apache.lucene.util.NamedThreadFactory; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.TimeSeriesIdGenerator; +import org.elasticsearch.index.TimeSeriesIdGenerator.ObjectComponent; +import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.indices.TimeSeriesIdGeneratorService.LocalIndex; +import org.elasticsearch.test.ESTestCase; + +import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; + +public class TimeSeriesIdGeneratorServiceTests extends ESTestCase { + /** + * Assert that non-timeseries indices don't call any lookups or build + * anything and never have a generator. + */ + public void testNonTimeSeries() { + try ( + TimeSeriesIdGeneratorService genService = genService( + i -> { throw new AssertionError("shouldn't be called"); }, + im -> { throw new AssertionError("shouldn't be called"); } + ) + ) { + Metadata meta = Metadata.builder().put(index("index", false)).build(); + genService.applyClusterState(meta); + assertNull(genService.apply(meta.index("index"))); + genService.stop(); + } + } + + /** + * Assert that a local time series index loads the time series from the local lookup. + */ + public void testLocalIndex() { + Metadata meta = Metadata.builder().put(index("index", true)).build(); + IndexMetadata indexMetadata = meta.index("index"); + TimeSeriesIdGenerator gen = mockGenerator(); + try (TimeSeriesIdGeneratorService genService = genService(i -> new LocalIndex() { + @Override + public long metadataVersion() { + return indexMetadata.getVersion() + between(0, Integer.MAX_VALUE); + } + + @Override + public TimeSeriesIdGenerator generator() { + return gen; + } + }, im -> { throw new AssertionError("shouldn't be called"); })) { + genService.applyClusterState(meta); + assertThat(genService.apply(indexMetadata), sameInstance(gen)); + genService.stop(); + } + } + + /** + * Assert that a local time series index will reuse the previous building if + * the mapping hasn't changed. + */ + public void testLocalIndexUnchangedMapping() { + TimeSeriesIdGenerator gen = mockGenerator(); + AtomicLong counter = new AtomicLong(); + + Metadata meta = Metadata.builder().put(index("index", true)).build(); + AtomicReference indexMetadata = new AtomicReference<>(meta.index("index")); + try (TimeSeriesIdGeneratorService genService = genService(i -> { + counter.incrementAndGet(); + return new LocalIndex() { + @Override + public long metadataVersion() { + return indexMetadata.get().getVersion() + between(0, Integer.MAX_VALUE); + } + + @Override + public TimeSeriesIdGenerator generator() { + return gen; + } + }; + }, im -> { throw new AssertionError("shouldn't be called"); })) { + for (int i = 0; i < 1000; i++) { + genService.applyClusterState(meta); + assertThat(genService.apply(indexMetadata.get()), sameInstance(gen)); + assertThat(counter.get(), equalTo(1L)); + } + + // Incrementing the mapping version will cause another fetch + meta = Metadata.builder() + .put(IndexMetadata.builder(indexMetadata.get()).mappingVersion(indexMetadata.get().getMappingVersion() + 1)) + .build(); + indexMetadata.set(meta.index("index")); + genService.applyClusterState(meta); + assertThat(genService.apply(indexMetadata.get()), sameInstance(gen)); + assertThat(counter.get(), equalTo(2L)); + genService.stop(); + } + } + + /** + * Assert that a non local time series index will build its {@link TimeSeriesIdGenerator}. + */ + public void testNonLocalIndex() throws Exception { + Metadata meta = Metadata.builder().put(index("index", true)).build(); + IndexMetadata indexMetadata = meta.index("index"); + TimeSeriesIdGenerator gen = mockGenerator(); + try (TimeSeriesIdGeneratorService genService = genService(i -> null, im -> gen)) { + genService.applyClusterState(meta); + assertBusy(() -> assertThat(genService.apply(indexMetadata), sameInstance(gen))); + genService.stop(); + } + } + + /** + * Assert that a non local time series index will reuse the previous building if + * the mapping hasn't changed. + */ + public void testNonLocalIndexUnchangedMapping() throws Exception { + TimeSeriesIdGenerator gen = mockGenerator(); + AtomicLong counter = new AtomicLong(); + + Metadata meta = Metadata.builder().put(index("index", true)).build(); + AtomicReference indexMetadata = new AtomicReference<>(meta.index("index")); + try (TimeSeriesIdGeneratorService genService = genService(i -> null, im -> { + counter.incrementAndGet(); + return gen; + })) { + for (int i = 0; i < 1000; i++) { + genService.applyClusterState(meta); + assertBusy(() -> assertThat(genService.apply(indexMetadata.get()), sameInstance(gen))); + assertThat(counter.get(), equalTo(1L)); + } + + // Incrementing the mapping version will cause another fetch + meta = Metadata.builder() + .put(IndexMetadata.builder(indexMetadata.get()).mappingVersion(indexMetadata.get().getMappingVersion() + 1)) + .build(); + indexMetadata.set(meta.index("index")); + genService.applyClusterState(meta); + assertBusy(() -> assertThat(genService.apply(indexMetadata.get()), sameInstance(gen))); + assertThat(counter.get(), equalTo(2L)); + genService.stop(); + } + } + + /** + * Assert that a non local time series index will reuse the previous building if + * the mapping hasn't changed. + */ + public void testNonLocalIndexSameMappingAsLocalIndex() throws Exception { + TimeSeriesIdGenerator gen = mockGenerator(); + + Metadata meta = Metadata.builder().put(index("index_1", true)).put(index("index_2", true)).build(); + try (TimeSeriesIdGeneratorService genService = genService(i -> { + if (i.getName().equals("index_1")) { + return new LocalIndex() { + @Override + public long metadataVersion() { + return meta.index("index_1").getVersion(); + } + + @Override + public TimeSeriesIdGenerator generator() { + return gen; + } + }; + } + return null; + }, im -> { throw new AssertionError("shouldn't be called"); })) { + genService.applyClusterState(meta); + assertThat(genService.apply(meta.index("index_1")), sameInstance(gen)); + assertThat(genService.apply(meta.index("index_2")), sameInstance(gen)); + } + } + + private TimeSeriesIdGeneratorService genService( + Function lookupLocalIndex, + Function buildTimeSeriedIdGenerator + ) { + ExecutorService executor = Executors.newSingleThreadExecutor(new NamedThreadFactory(getTestName())); + TimeSeriesIdGeneratorService genService = new TimeSeriesIdGeneratorService(executor, lookupLocalIndex, buildTimeSeriedIdGenerator); + genService.start(); + return genService; + } + + private IndexMetadata.Builder index(String index, boolean timeSeriesMode) { + Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT); + if (timeSeriesMode) { + settings.put(IndexSettings.TIME_SERIES_MODE.getKey(), true); + } + return IndexMetadata.builder(index) + .settings(settings) + .numberOfShards(between(1, 10)) + .numberOfReplicas(randomInt(20)) + .putMapping("{}"); + } + + private TimeSeriesIdGenerator mockGenerator() { + return new TimeSeriesIdGenerator(new ObjectComponent(Map.of("a", KeywordFieldMapper.timeSeriesIdGenerator(null)))); + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 78506b87f07e5..a71fc54f02d8f 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1884,7 +1884,8 @@ protected void assertSnapshotOrGenericThread() { actionFilters, indexNameExpressionResolver, new IndexingPressure(settings), - EmptySystemIndices.INSTANCE + EmptySystemIndices.INSTANCE, + indicesService ) ); final TransportShardBulkAction transportShardBulkAction = new TransportShardBulkAction( From f811ce4aec0a1a2d0c54775fd2bba5139961e0e4 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 22 Jul 2021 20:43:26 -0400 Subject: [PATCH 02/29] Better support for feature flag --- qa/mixed-cluster/build.gradle | 1 + qa/smoke-test-multinode/build.gradle | 1 + .../test/smoke_test_multinode/20_tsdb_consistency.yml | 1 + rest-api-spec/build.gradle | 1 + server/src/main/java/org/elasticsearch/index/IndexSettings.java | 2 +- 5 files changed, 5 insertions(+), 1 deletion(-) diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 562847cd9faa3..2d7d5bc04b98b 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -39,6 +39,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' + systemProperty 'es.time_series_mode_feature_flag_registered', 'true' } } diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 1830ef309b4fb..90590fbf756c9 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -25,6 +25,7 @@ testClusters.matching { it.name == "integTest" }.configureEach { testClusters.all { setting 'xpack.security.enabled', 'false' + systemProperty 'es.time_series_mode_feature_flag_registered', 'true' } tasks.named("integTest").configure { diff --git a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml index 0fda1a234e0d2..147b2633a4a0d 100644 --- a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml +++ b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml @@ -9,6 +9,7 @@ setup: settings: index: time_series_mode: true + number_of_shards: 3 mappings: properties: "@timestamp": diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index f19ad81839b9e..7b8491d15ff4c 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -24,6 +24,7 @@ artifacts { testClusters.all { module ':modules:mapper-extras' + systemProperty 'es.time_series_mode_feature_flag_registered', 'true' } tasks.named("test").configure { enabled = false } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 82846e2ec3a57..3a45cb2442c28 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -532,7 +532,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); this.mergePolicyConfig = new MergePolicyConfig(logger, this); - timeSeriesMode = scopedSettings.get(TIME_SERIES_MODE); + timeSeriesMode = isTimeSeriesModeEnabled() ? scopedSettings.get(TIME_SERIES_MODE) : false; this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); From 0fb80607c0ad791065caa898c82e311e69e5125e Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 26 Jul 2021 17:11:53 -0400 Subject: [PATCH 03/29] build build build --- qa/mixed-cluster/build.gradle | 4 +++- qa/smoke-test-multinode/build.gradle | 6 +++++- rest-api-spec/build.gradle | 5 ++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 2d7d5bc04b98b..b2aec391b17f7 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -39,7 +39,9 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' - systemProperty 'es.time_series_mode_feature_flag_registered', 'true' + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.time_series_mode_feature_flag_registered', 'true' + } } } diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 90590fbf756c9..d0725d257e1a9 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -6,6 +6,8 @@ * Side Public License, v 1. */ +import org.elasticsearch.gradle.internal.info.BuildParams + apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -25,7 +27,9 @@ testClusters.matching { it.name == "integTest" }.configureEach { testClusters.all { setting 'xpack.security.enabled', 'false' - systemProperty 'es.time_series_mode_feature_flag_registered', 'true' + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.time_series_mode_feature_flag_registered', 'true' + } } tasks.named("integTest").configure { diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 28575b778d0ee..2b98a4ec53091 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -1,4 +1,5 @@ import org.elasticsearch.gradle.OS +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.build' apply plugin: 'elasticsearch.publish' @@ -24,7 +25,9 @@ artifacts { testClusters.all { module ':modules:mapper-extras' - systemProperty 'es.time_series_mode_feature_flag_registered', 'true' + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.time_series_mode_feature_flag_registered', 'true' + } } tasks.named("test").configure { enabled = false } From fbb1838fee581198825d821946fb7c5fe23b0750 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 27 Jul 2021 12:00:20 -0400 Subject: [PATCH 04/29] Support time series index without dimensions You can't write any documents into the index, but you can create one. This is useful because you might want to create an index and then use dynamic mappings and templates to fill in the dimensions later. --- .../rest-api-spec/test/tsdb/20_bad_config.yml | 37 ------------- .../test/tsdb/50_add_missing_dimensions.yml | 55 +++++++++++++++++++ ...nsion_types.yml => 60_dimension_types.yml} | 0 .../index/TimeSeriesIdGenerator.java | 15 +++-- .../index/TimeSeriesIdGeneratorTests.java | 8 +++ 5 files changed, 73 insertions(+), 42 deletions(-) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml rename rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/{50_dimension_types.yml => 60_dimension_types.yml} (100%) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml index f94d7f2506f20..321f0b757997d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml @@ -1,41 +1,4 @@ ---- -"no timestamp": - - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.15.0 - - - do: - catch: /unknown index sort field:\[@timestamp\] required by \[index\.time_series_mode\]/ - indices.create: - index: test - body: - settings: - index: - time_series_mode: true - mappings: - properties: - metricset: - type: keyword - dimension: true ---- -"no dimenions": - - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.15.0 - - - do: - catch: /Index configured with \[index.time_series_mode\] requires at least one field configured with \[dimension:true\]/ - indices.create: - index: test - body: - settings: - index: - time_series_mode: true - mappings: - properties: - "@timestamp": - type: date --- "dimension with ignore_above": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml new file mode 100644 index 0000000000000..a08ce7b22b2b7 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml @@ -0,0 +1,55 @@ +--- +"add dimensions after creation": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + + - do: + catch: "/Error building time series id: There aren't any mapped dimensions/" + index: + index: test + refresh: true + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: cat + + - do: + indices.put_mapping: + index: test + body: + properties: + metricset: + type: keyword + dimension: true + + - do: + index: + index: test + refresh: true + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: cat + + - do: + search: + index: test + body: + fields: + - field: _tsid + - field: "@timestamp" + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} + - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_dimension_types.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml similarity index 100% rename from rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_dimension_types.yml rename to rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml diff --git a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java index 88adb3f3a9c5e..3613455f5929f 100644 --- a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java +++ b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java @@ -135,11 +135,13 @@ public final class TimeSeriesIdGenerator { public TimeSeriesIdGenerator(ObjectComponent root) { if (root == null) { - throw new IllegalArgumentException( - "Index configured with [" - + IndexSettings.TIME_SERIES_MODE.getKey() - + "] requires at least one field configured with [dimension:true]" - ); + /* + * This can happen if an index is configured in time series mode + * without any mapping. It's fine - we'll add dimensions later. + * For now it'll make a generator that will fail to index any + * documents. Which is totally ok. + */ + root = new ObjectComponent(Map.of()); } root.collectDimensionNames("", name -> { int bytes = UnicodeUtil.calcUTF16toUTF8Length(name, 0, name.length()); @@ -167,6 +169,9 @@ public BytesReference generate(XContentParser parser) throws IOException { if (values.isEmpty()) { List dimensionNames = new ArrayList<>(); root.collectDimensionNames("", dimensionNames::add); + if (dimensionNames.isEmpty()) { + throw new IllegalArgumentException("There aren't any mapped dimensions"); + } Collections.sort(dimensionNames); throw new IllegalArgumentException("Document must contain one of the dimensions " + dimensionNames); } diff --git a/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java index ac7b9ad62d746..47d2344588f37 100644 --- a/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java @@ -456,6 +456,14 @@ public void testFewerDimensions() throws IOException { assertThat(gen.generate(parser(modified)), not(equalTo(gen.generate(parser(orig))))); } + public void testEmpty() throws IOException { + Exception e = expectThrows( + IllegalArgumentException.class, + () -> new TimeSeriesIdGenerator(null).generate(parser(Map.of())).streamInput() + ); + assertThat(e.getMessage(), equalTo("There aren't any mapped dimensions")); + } + /** * Removes one of the dimensions from a document. */ From a10c4b64904239a81b94490338a57d1a92044746 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 27 Jul 2021 13:25:44 -0400 Subject: [PATCH 05/29] Split --- .../test/tsdb/70_split_merge.yml | 114 ++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_split_merge.yml diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_split_merge.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_split_merge.yml new file mode 100644 index 0000000000000..ed8f56b9f793f --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_split_merge.yml @@ -0,0 +1,114 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + number_of_shards: 3 + number_of_replicas: 0 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + +--- +split: + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + + - do: + indices.split: + index: test + target: test_split + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 6 + + - do: + search: + index: test_split + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + +--- +split: + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + + - do: + indices.join: + index: test + target: test_join + body: + settings: + index.number_of_shards: 1 + + - do: + search: + index: test_split + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} \ No newline at end of file From d3c6c9b47ead0da38a43cfb1d8ffafcc1453c0a4 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 29 Jul 2021 09:54:16 -0400 Subject: [PATCH 06/29] Ouch --- .../upgrades/FullClusterRestartIT.java | 1 + .../elasticsearch/upgrades/IndexingIT.java | 2 + .../20_tsdb_consistency.yml | 196 +++++++- .../rest-api-spec/test/tsdb/20_bad_config.yml | 29 +- ...70_split_merge.yml => 70_index_resize.yml} | 62 ++- .../test/tsdb/80_term_vectors.yml | 93 ++++ .../indices/recovery/IndexRecoveryIT.java | 423 ++++++++++++------ .../IndexRecoveryInTimeSeriesModeIT.java | 60 +++ .../action/bulk/TransportShardBulkAction.java | 37 +- .../action/index/IndexRequest.java | 27 +- .../cluster/metadata/IndexMetadata.java | 2 + .../elasticsearch/index/IndexSettings.java | 33 +- .../index/TimeSeriesIdGenerator.java | 32 +- .../index/engine/LuceneChangesSnapshot.java | 12 + .../index/engine/TranslogDirectoryReader.java | 12 +- .../index/mapper/DocumentMapper.java | 11 +- .../index/mapper/MapperService.java | 9 +- .../elasticsearch/index/mapper/Mapping.java | 28 +- .../index/mapper/MappingLookup.java | 2 +- .../index/mapper/MappingParser.java | 8 +- .../index/mapper/SourceToParse.java | 52 ++- .../index/mapper/TimeSeriesIdFieldMapper.java | 10 +- .../elasticsearch/index/shard/IndexShard.java | 26 +- .../index/termvectors/TermVectorsService.java | 8 +- .../indices/TimeSeriesIdGeneratorService.java | 12 +- .../MetadataRolloverServiceTests.java | 2 +- .../bulk/TransportShardBulkActionTests.java | 32 +- .../index/MappingUpdatedActionTests.java | 2 +- .../index/TimeSeriesIdGeneratorTests.java | 30 +- .../index/mapper/DocumentMapperTests.java | 2 +- .../index/mapper/DocumentParserTests.java | 22 +- .../index/mapper/DynamicTemplatesTests.java | 14 +- .../FieldAliasMapperValidationTests.java | 7 +- .../index/mapper/MappingLookupTests.java | 7 +- .../index/mapper/MappingParserTests.java | 4 +- .../index/mapper/RoutingFieldMapperTests.java | 4 +- .../mapper/TimeSeriesIdFieldMapperTests.java | 80 ++++ .../query/SearchExecutionContextTests.java | 7 +- .../index/shard/ShardGetServiceTests.java | 129 +++++- .../IndexingMemoryControllerTests.java | 3 +- .../TimeSeriesIdGeneratorServiceTests.java | 109 ++++- .../index/engine/TranslogHandler.java | 13 +- .../index/mapper/MapperServiceTestCase.java | 24 +- .../index/shard/IndexShardTestCase.java | 18 +- .../elasticsearch/test/BackgroundIndexer.java | 18 +- .../resources/rest-api-spec/test/ccr/tsdb.yml | 1 + .../SourceOnlySnapshotShardTests.java | 55 ++- .../test/security/authz/70_tsdb.yml | 1 + 48 files changed, 1438 insertions(+), 333 deletions(-) rename rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/{70_split_merge.yml => 70_index_resize.yml} (77%) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java create mode 100644 x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 1c4c972b83347..145167b574248 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -1633,4 +1633,5 @@ public static void assertNumHits(String index, int numHits, int totalShards) thr assertThat(extractTotalHits(resp), equalTo(numHits)); } + // NOCOMMIT tsdb smoke test } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index ea09cb95f7d62..c99e2412813b4 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -258,4 +258,6 @@ private Version minNodeVersion() throws IOException { } return minNodeVersion; } + + // NOCOMMIT tsdb smoke test } diff --git a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml index 147b2633a4a0d..3229c80a49fa1 100644 --- a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml +++ b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml @@ -10,6 +10,7 @@ setup: index: time_series_mode: true number_of_shards: 3 + number_of_replicas: 1 mappings: properties: "@timestamp": @@ -56,6 +57,16 @@ setup: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' +--- +"index with replicas and shards is green": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + cluster.health: + wait_for_status: green + - match: { status: green } --- "each shard has unique _tsids": @@ -85,7 +96,6 @@ setup: } } return timeSeries; - - match: {hits.total.value: 8} - length: {aggregations.check.value: 2} @@ -127,6 +137,190 @@ setup: } } return timeSeries; + - match: {hits.total.value: 8} + - length: {aggregations.check.value: 2} + +--- +split: + - skip: + version: all + reason: shard splitting doesn't work yet + + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + + - do: + indices.split: + index: test + target: test_split + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 6 + - do: + search: + index: test_split + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + + - do: + search: + index: test_split + body: + size: 0 + aggs: + check: + scripted_metric: + init_script: "state.timeSeries = new HashSet()" + map_script: "state.timeSeries.add(doc._tsid.value)" + combine_script: "return state.timeSeries" + reduce_script: | + Set timeSeries = new TreeSet(); + for (s in states) { + for (ts in s) { + boolean newTs = timeSeries.add(ts); + if (false == newTs) { + throw new IllegalArgumentException(ts + " appeared in two shards"); + } + } + } + return timeSeries; + - match: {hits.total.value: 8} + - length: {aggregations.check.value: 2} + +--- +shrink: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + features: "arbitrary_key" + + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id + + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + index.routing.allocation.include._id: $node_id + + - do: + cluster.health: + wait_for_status: green + wait_for_no_relocating_shards: true + index: test + + - do: + indices.shrink: + index: test + target: test_shrink + body: + settings: + index.number_of_shards: 1 + + - do: + search: + index: test_shrink + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + + - do: + search: + index: test_shrink + body: + size: 0 + aggs: + check: + scripted_metric: + init_script: "state.timeSeries = new HashSet()" + map_script: "state.timeSeries.add(doc._tsid.value)" + combine_script: "return state.timeSeries" + reduce_script: | + Set timeSeries = new TreeSet(); + for (s in states) { + for (ts in s) { + boolean newTs = timeSeries.add(ts); + if (false == newTs) { + throw new IllegalArgumentException(ts + " appeared in two shards"); + } + } + } + return timeSeries; + - match: {hits.total.value: 8} + - length: {aggregations.check.value: 2} + +--- +clone: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + + - do: + indices.clone: + index: test + target: test_clone + + - do: + search: + index: test_clone + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + + - do: + search: + index: test_clone + body: + size: 0 + aggs: + check: + scripted_metric: + init_script: "state.timeSeries = new HashSet()" + map_script: "state.timeSeries.add(doc._tsid.value)" + combine_script: "return state.timeSeries" + reduce_script: | + Set timeSeries = new TreeSet(); + for (s in states) { + for (ts in s) { + boolean newTs = timeSeries.add(ts); + if (false == newTs) { + throw new IllegalArgumentException(ts + " appeared in two shards"); + } + } + } + return timeSeries; - match: {hits.total.value: 8} - length: {aggregations.check.value: 2} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml index 321f0b757997d..dc1d02c4d028b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml @@ -1,5 +1,3 @@ - - --- "dimension with ignore_above": - skip: @@ -100,3 +98,30 @@ metricset: type: keyword dimension: true + +--- +"routing_partition_size unsupported": + # routing_partition_size does the opposite of what we're going for here..... + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /\[index.time_series_mode\] is incompatible with \[index.routing_partition_size\]/ + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + number_of_shards: 10 + routing_partition_size: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + +# TODO on backport do not support turning off soft deletes with tsdb - we won't have good test coverage for it. diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_split_merge.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml similarity index 77% rename from rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_split_merge.yml rename to rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml index ed8f56b9f793f..586783af0d352 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_split_merge.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml @@ -54,14 +54,19 @@ setup: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' ---- -split: - do: indices.put_settings: index: test body: index.blocks.write: true +--- +split: + - skip: + version: all + reason: shard splitting doesn't work yet + features: "arbitrary_key" + - do: indices.split: index: test @@ -85,24 +90,65 @@ split: - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- -split: +shrink: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + features: "arbitrary_key" + + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id + - do: indices.put_settings: index: test body: - index.blocks.write: true + index.routing.allocation.include._id: $node_id + + - do: + cluster.health: + wait_for_status: green + wait_for_no_relocating_shards: true + index: test - do: - indices.join: + indices.shrink: index: test - target: test_join + target: test_shrink body: settings: index.number_of_shards: 1 - do: search: - index: test_split + index: test_shrink + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + +--- +clone: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.clone: + index: test + target: test_clone + + - do: + search: + index: test_clone body: fields: - field: _tsid @@ -111,4 +157,4 @@ split: query: '+@timestamp:"2021-04-28T18:03:24.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} \ No newline at end of file + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml new file mode 100644 index 0000000000000..e822f73815d9e --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml @@ -0,0 +1,93 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "words": "there are some words here", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + +--- +for an indexed document: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: test + - match: {hits.total.value: 1} + + - do: + termvectors: + index: test + id: $body.hits.hits.0._id + fields: words + - match: {term_vectors.words.field_statistics.sum_doc_freq: 5} + - match: {term_vectors.words.terms.are.tokens.0.position: 1} + - match: {term_vectors.words.terms.here.tokens.0.position: 4} + - match: {term_vectors.words.terms.some.tokens.0.position: 2} + - match: {term_vectors.words.terms.there.tokens.0.position: 0} + - match: {term_vectors.words.terms.words.tokens.0.position: 3} + +--- +for an artificial document: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + termvectors: + index: test + fields: words + body: + doc: + "@timestamp": "2021-04-28T18:35:24.467Z" + words: there are some words here + metricset: pod + k8s: + pod: + name: cat + uid: 947e4ced-1786-4e53-9e0c-5c447e959507 + ip: 10.10.55.1 + network: + tx: 2001818691 + rx: 802133794 + - match: {term_vectors.words.field_statistics.sum_doc_freq: 5} + - match: {term_vectors.words.terms.are.tokens.0.position: 1} + - match: {term_vectors.words.terms.here.tokens.0.position: 4} + - match: {term_vectors.words.terms.some.tokens.0.position: 2} + - match: {term_vectors.words.terms.there.tokens.0.position: 0} + - match: {term_vectors.words.terms.words.tokens.0.position: 3} + diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index bc0cc6e434211..3093e81c25b9f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -53,9 +53,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.ReplicaShardAllocatorIT; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -144,7 +144,7 @@ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndexRecoveryIT extends ESIntegTestCase { - private static final String INDEX_NAME = "test-idx-1"; + protected static final String INDEX_NAME = "test-idx-1"; private static final String REPO_NAME = "test-repo-1"; private static final String SNAP_NAME = "test-snap-1"; @@ -266,14 +266,19 @@ public void testGatewayRecoveryTestActiveOnly() throws Exception { public void testReplicaRecovery() throws Exception { final String nodeA = internalCluster().startNode(); - createIndex(INDEX_NAME, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) - .build()); + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .build() + ).setMapping(minimalMapping()) + ); ensureGreen(INDEX_NAME); final int numOfDocs = scaledRandomIntBetween(0, 200); - try (BackgroundIndexer indexer = new BackgroundIndexer(INDEX_NAME, "_doc", client(), numOfDocs)) { + try (BackgroundIndexer indexer = backgroundIndexer(numOfDocs)) { waitForDocs(numOfDocs, indexer); } @@ -288,8 +293,15 @@ public void testReplicaRecovery() throws Exception { // force a shard recovery from nodeA to nodeB final String nodeB = internalCluster().startNode(); - assertAcked(client().admin().indices().prepareUpdateSettings(INDEX_NAME) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1))); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ); ensureGreen(INDEX_NAME); final RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet(); @@ -332,29 +344,41 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio final String nodeA = internalCluster().startNode(); logger.info("--> create index on node: {}", nodeA); - createIndex(INDEX_NAME, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") - .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms").build()); + assertAcked( + prepareCreate(INDEX_NAME).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .build() + ).setMapping(minimalMapping()) + ); int numDocs = randomIntBetween(10, 200); final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { docs[i] = client().prepareIndex(INDEX_NAME). - setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + setSource(source(Map.of("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()))); } - indexRandom(randomBoolean(), docs); + indexRandom(randomBoolean(), dummyDocuments(), docs); logger.info("--> start node B"); // force a shard recovery from nodeA to nodeB final String nodeB = internalCluster().startNode(); logger.info("--> add replica for {} on node: {}", INDEX_NAME, nodeB); - assertAcked(client().admin().indices().prepareUpdateSettings(INDEX_NAME) - .setSettings(Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0))); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), 0) + ) + ); ensureGreen(INDEX_NAME); logger.info("--> start node C"); @@ -534,8 +558,7 @@ public void testRerouteRecovery() throws Exception { assertBusy(() -> assertNodeHasThrottleTimeAndNoRecoveries.accept(nodeB)); logger.info("--> bump replica count"); - client().admin().indices().prepareUpdateSettings(INDEX_NAME) - .setSettings(Settings.builder().put("number_of_replicas", 1)).execute().actionGet(); + client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(Settings.builder().put("number_of_replicas", 1)).get(); ensureGreen(); assertBusy(() -> assertNodeHasThrottleTimeAndNoRecoveries.accept(nodeA)); @@ -686,11 +709,20 @@ private List findRecoveriesForTargetNode(String nodeName, List creating test index: {}", name); - assertAcked(prepareCreate(name, nodeCount, Settings.builder().put("number_of_shards", shardCount) - .put("number_of_replicas", replicaCount).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0))); + assertAcked( + prepareCreate( + name, + nodeCount, + Settings.builder() + .put("number_of_shards", shardCount) + .put("number_of_replicas", replicaCount) + .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ) + .setMapping(minimalMapping()) + ); ensureGreen(); logger.info("--> indexing sample data"); @@ -698,13 +730,11 @@ private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { - docs[i] = client().prepareIndex(name). - setSource("foo-int", randomInt(), - "foo-string", randomAlphaOfLength(32), - "foo-float", randomFloat()); + docs[i] = client().prepareIndex(name) + .setSource(source(Map.of("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()))); } - indexRandom(true, docs); + indexRandom(true, dummyDocuments(), docs); flush(); assertThat(client().prepareSearch(name).setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs)); return client().admin().indices().prepareStats(name).execute().actionGet(); @@ -736,13 +766,15 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get(); assertThat(response.isTimedOut(), is(false)); - client().admin().indices().prepareCreate(indexName) - .setSettings( + assertAcked( + prepareCreate(indexName).setSettings( Settings.builder() .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - ).get(); + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(100, 8000); @@ -750,16 +782,16 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { // is a mix of file chunks and translog ops int threeFourths = (int) (numDocs * 0.75); for (int i = 0; i < threeFourths; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource(source(Map.of()), XContentType.JSON)); } - indexRandom(true, requests); + indexRandom(true, dummyDocuments(), requests); flush(indexName); requests.clear(); for (int i = threeFourths; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource(source(Map.of()), XContentType.JSON)); } - indexRandom(true, requests); + indexRandom(true, dummyDocuments(), requests); ensureSearchable(indexName); ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); @@ -916,21 +948,22 @@ public void testDisconnectsWhileRecovering() throws Exception { ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get(); assertThat(response.isTimedOut(), is(false)); - - client().admin().indices().prepareCreate(indexName) - .setSettings( - Settings.builder() - .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - ).get(); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource(source(Map.of()), XContentType.JSON)); } - indexRandom(true, requests); + indexRandom(true, dummyDocuments(), requests); ensureSearchable(indexName); ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); @@ -1028,20 +1061,22 @@ public void testDisconnectsDuringRecovery() throws Exception { final String redNodeName = internalCluster() .startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); - client().admin().indices().prepareCreate(indexName) - .setSettings( + assertAcked( + prepareCreate(indexName).setSettings( Settings.builder() .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - ).get(); + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); List requests = new ArrayList<>(); int numDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numDocs; i++) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource(source(Map.of()), XContentType.JSON)); } - indexRandom(true, requests); + indexRandom(true, dummyDocuments(), requests); ensureSearchable(indexName); assertHitCount(client().prepareSearch(indexName).get(), numDocs); @@ -1136,19 +1171,24 @@ public void testHistoryRetention() throws Exception { internalCluster().startNodes(3); final String indexName = "test"; - client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) - .put(IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.getKey(), 1.0)).get(); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) + .put(IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.getKey(), 1.0) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); ensureGreen(indexName); // Perform some replicated operations so the replica isn't simply empty, because ops-based recovery isn't better in that case final List requests = new ArrayList<>(); final int replicatedDocCount = scaledRandomIntBetween(25, 250); while (requests.size() < replicatedDocCount) { - requests.add(client().prepareIndex(indexName).setSource("{}", XContentType.JSON)); + requests.add(client().prepareIndex(indexName).setSource(source(Map.of()), XContentType.JSON)); } - indexRandom(true, requests); + indexRandom(true, dummyDocuments(), requests); if (randomBoolean()) { flush(indexName); } @@ -1168,7 +1208,7 @@ public void testHistoryRetention() throws Exception { final int numNewDocs = scaledRandomIntBetween(25, 250); for (int i = 0; i < numNewDocs; i++) { - client().prepareIndex(indexName).setSource("{}", XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + client().prepareIndex(indexName).setSource(source(Map.of()), XContentType.JSON).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); } // Flush twice to update the safe commit's local checkpoint assertThat(client().admin().indices().prepareFlush(indexName).setForce(true).execute().get().getFailedShards(), equalTo(0)); @@ -1189,19 +1229,24 @@ public void testHistoryRetention() throws Exception { assertThat(recoveryState.getTranslog().recoveredOperations(), greaterThan(0)); } - public void testDoNotInfinitelyWaitForMapping() { + public void testDoNotInfinitelyWaitForMapping() throws IOException { internalCluster().ensureAtLeastNumDataNodes(3); - createIndex("test", Settings.builder() - .put("index.analysis.analyzer.test_analyzer.type", "custom") - .put("index.analysis.analyzer.test_analyzer.tokenizer", "standard") - .putList("index.analysis.analyzer.test_analyzer.filter", "test_token_filter") - .put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build()); - client().admin().indices().preparePutMapping("test") - .setSource("test_field", "type=text,analyzer=test_analyzer").get(); + assertAcked( + prepareCreate("test").setSettings( + Settings.builder() + .put("index.analysis.analyzer.test_analyzer.type", "custom") + .put("index.analysis.analyzer.test_analyzer.tokenizer", "standard") + .putList("index.analysis.analyzer.test_analyzer.filter", "test_token_filter") + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 1) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); + client().admin().indices().preparePutMapping("test").setSource("test_field", "type=text,analyzer=test_analyzer").get(); int numDocs = between(1, 10); for (int i = 0; i < numDocs; i++) { client().prepareIndex("test").setId("u" + i) - .setSource(singletonMap("test_field", Integer.toString(i)), XContentType.JSON).get(); + .setSource(source(singletonMap("test_field", Integer.toString(i))), XContentType.JSON).get(); } Semaphore recoveryBlocked = new Semaphore(1); for (DiscoveryNode node : clusterService().state().nodes()) { @@ -1230,11 +1275,15 @@ public void testOngoingRecoveryAndMasterFailOver() throws Exception { String indexName = "test"; internalCluster().startNodes(2); String nodeWithPrimary = internalCluster().startDataOnlyNode(); - assertAcked(client().admin().indices().prepareCreate(indexName) - .setSettings(Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.routing.allocation.include._name", nodeWithPrimary))); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.routing.allocation.include._name", nodeWithPrimary) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); MockTransportService transport = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary); CountDownLatch phase1ReadyBlocked = new CountDownLatch(1); CountDownLatch allowToCompletePhase1Latch = new CountDownLatch(1); @@ -1274,17 +1323,21 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { List nodes = randomSubsetOf(2, StreamSupport.stream(clusterService().state().nodes().getDataNodes().spliterator(), false) .map(node -> node.value.getName()).collect(Collectors.toSet())); String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - // disable global checkpoint background sync so we can verify the start recovery request - .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "12h") - .put("index.routing.allocation.include._name", String.join(",", nodes)) - .build()); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + // disable global checkpoint background sync so we can verify the start recovery request + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "12h") + .put("index.routing.allocation.include._name", String.join(",", nodes)) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); ensureGreen(indexName); int numDocs = randomIntBetween(0, 100); indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); client().admin().indices().prepareRefresh(indexName).get(); // avoid refresh when we are failing a shard String failingNode = randomFrom(nodes); PlainActionFuture startRecoveryRequestFuture = new PlainActionFuture<>(); @@ -1356,14 +1409,19 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() throws Exception internalCluster().ensureAtLeastNumDataNodes(2); String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") - .build()); - indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(0, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .build() + ).setMapping(minimalMapping()) + ); + indexRandom(randomBoolean(), dummyDocuments(), randomBoolean(), IntStream.range(0, between(0, 100)) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); ensureGreen(indexName); final ShardId shardId = new ShardId(resolveIndex(indexName), 0); @@ -1402,14 +1460,19 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() t internalCluster().ensureAtLeastNumDataNodes(2); String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") - .build()); - indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(0, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .build() + ).setMapping(minimalMapping()) + ); + indexRandom(randomBoolean(), dummyDocuments(), randomBoolean(), IntStream.range(0, between(0, 100)) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); ensureGreen(indexName); final ShardId shardId = new ShardId(resolveIndex(indexName), 0); @@ -1428,8 +1491,8 @@ public Settings onNodeStopped(String nodeName) throws Exception { .setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)) .setWaitForEvents(Priority.LANGUID).get().isTimedOut()); - indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(1, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + indexRandom(randomBoolean(), dummyDocuments(), randomBoolean(), IntStream.range(0, between(1, 100)) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); // We do not guarantee that the replica can recover locally all the way to its own global checkpoint before starting // to recover from the primary, so we must be careful not to perform an operations-based recovery if this would require @@ -1458,7 +1521,8 @@ public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonabl .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms"); + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()); final double reasonableOperationsBasedRecoveryProportion; if (randomBoolean()) { @@ -1471,9 +1535,9 @@ public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonabl } logger.info("--> performing ops-based recoveries up to [{}%] of docs", reasonableOperationsBasedRecoveryProportion * 100.0); - createIndex(indexName, settings.build()); + assertAcked(prepareCreate(indexName).setSettings(settings).setMapping(minimalMapping())); indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(0, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); ensureGreen(indexName); flush(indexName); @@ -1531,8 +1595,8 @@ public Settings onNodeStopped(String nodeName) throws Exception { * ==> it is unreasonable to recover the replica using a seqno-based recovery */ - indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, newDocCount) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + indexRandom(randomBoolean(), dummyDocuments(), randomBoolean(), IntStream.range(0, newDocCount) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); flush(indexName); @@ -1556,12 +1620,18 @@ public void testDoesNotCopyOperationsInSafeCommit() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build()); - indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(0, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .build() + ).setMapping(minimalMapping()) + ); + indexRandom(randomBoolean(), dummyDocuments(), randomBoolean(), IntStream.range(0, between(0, 100)) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); final ShardId shardId = new ShardId(resolveIndex(indexName), 0); final DiscoveryNodes discoveryNodes = clusterService().state().nodes(); @@ -1573,11 +1643,15 @@ public void testDoesNotCopyOperationsInSafeCommit() throws Exception { assertBusy(() -> assertThat(primary.getLastSyncedGlobalCheckpoint(), equalTo(maxSeqNoBeforeRecovery))); assertThat(client().admin().indices().prepareFlush(indexName).get().getFailedShards(), is(0)); // makes a safe commit - indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(0, 100)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + indexRandom(randomBoolean(), dummyDocuments(), randomBoolean(), IntStream.range(0, between(0, 100)) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); - assertAcked(client().admin().indices().prepareUpdateSettings(indexName) - .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1))); + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(indexName) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); ensureGreen(indexName); final long maxSeqNoAfterRecovery = primary.seqNoStats().getMaxSeqNo(); @@ -1612,13 +1686,17 @@ public void testRepeatedRecovery() throws Exception { // node that held it previously, in case that node hasn't completely cleared it up. final String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 6)) - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "200ms") - .build()); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 6)) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "200ms") + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ).setMapping(minimalMapping()) + ); indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, randomIntBetween(0, 10)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); assertThat(client().admin().indices().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); @@ -1638,7 +1716,7 @@ public void testRepeatedRecovery() throws Exception { logger.info("--> index more documents"); indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, randomIntBetween(0, 10)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("num", n)))).collect(toList())); logger.info("--> add replicas again"); assertAcked(client().admin().indices().prepareUpdateSettings(indexName) @@ -1651,13 +1729,23 @@ public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception { final List dataNodes = internalCluster().startDataOnlyNodes(2); final Settings randomNodeDataPathSettings = internalCluster().dataPathSettings(randomFrom(dataNodes)); final String indexName = "test"; - assertAcked(client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1) - .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), randomBoolean())).get()); + assertAcked( + client().admin() + .indices() + .prepareCreate(indexName) + .setSettings( + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), randomBoolean()) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ) + .setMapping(minimalMapping()) + ); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("foo", "bar")))) .collect(Collectors.toList()); - indexRandom(randomBoolean(), true, true, indexRequests); + indexRandom(randomBoolean(), dummyDocuments(), true, indexRequests); ensureGreen(); internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode(); @@ -1677,9 +1765,16 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { internalCluster().startNode(); List dataNodes = internalCluster().startDataOnlyNodes(2); String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1) - .put("index.routing.allocation.include._name", String.join(",", dataNodes)).build()); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + .put("index.routing.allocation.include._name", String.join(",", dataNodes)) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .build() + ).setMapping(minimalMapping()) + ); ensureGreen(indexName); ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); DiscoveryNode nodeWithOldPrimary = clusterState.nodes().get(clusterState.routingTable() @@ -1706,7 +1801,7 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { while (stopped.get() == false) { try { IndexResponse response = client().prepareIndex(indexName) - .setSource(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON).get(); + .setSource(source(Map.of("f" + randomIntBetween(1, 10), randomNonNegativeLong())), XContentType.JSON).get(); assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); } catch (ElasticsearchException ignored) { } @@ -1727,9 +1822,18 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { public void testCancelRecoveryWithAutoExpandReplicas() throws Exception { internalCluster().startMasterOnlyNode(); - assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all")) - .setWaitForActiveShards(ActiveShardCount.NONE)); + assertAcked( + client().admin() + .indices() + .prepareCreate("test") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all") + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + ) + .setMapping(minimalMapping()) + .setWaitForActiveShards(ActiveShardCount.NONE) + ); internalCluster().startNode(); internalCluster().startNode(); client().admin().cluster().prepareReroute().setRetryFailed(true).get(); @@ -1745,14 +1849,21 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { internalCluster().startNode(); List dataNodes = internalCluster().startDataOnlyNodes(2); String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 0) - .put("index.routing.allocation.include._name", String.join(",", dataNodes)).build()); + assertAcked( + prepareCreate(indexName).setSettings( + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.routing.allocation.include._name", String.join(",", dataNodes)) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .build() + ).setMapping(minimalMapping()) + ); ensureGreen(indexName); final List indexRequests = IntStream.range(0, between(10, 500)) - .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .mapToObj(n -> client().prepareIndex(indexName).setSource(source(Map.of("foo", "bar")))) .collect(Collectors.toList()); - indexRandom(randomBoolean(), true, true, indexRequests); + indexRandom(randomBoolean(), dummyDocuments(), true, indexRequests); assertThat(client().admin().indices().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); @@ -1785,8 +1896,9 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { connection.sendRequest(requestId, action, request, options); }); - assertAcked(client().admin().indices().prepareUpdateSettings(indexName) - .setSettings(Settings.builder().put("index.number_of_replicas", 1))); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put("index.number_of_replicas", 1)) + ); ensureGreen(); assertTrue(fileInfoIntercepted.get()); assertTrue(fileChunkIntercepted.get()); @@ -1795,4 +1907,33 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .mapToLong(n -> n.getIndices().getStore().getReservedSize().getBytes()).sum(), equalTo(0L)); } + protected boolean inTimeSeriesMode() { + return false; + } + + /** + * The smallest acceptable mapping to create the index and index documents into it. + */ + protected String minimalMapping() throws IOException { + return "{}"; + } + + /** + * Convert a document's {@code _source} into one compatible with + * the {@link #minimalMapping()}. + */ + protected Map source(Map source) { + return source; + } + + /** + * Should we add dummy documents when running {@link ESIntegTestCase#indexRandom}? + */ + protected boolean dummyDocuments() { + return randomBoolean(); + } + + protected BackgroundIndexer backgroundIndexer(int numOfDocs) { + return new BackgroundIndexer(INDEX_NAME, "_doc", client(), numOfDocs); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java new file mode 100644 index 0000000000000..1af15deb7bb78 --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.BackgroundIndexer; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.ESIntegTestCase.Scope; + +import java.io.IOException; +import java.util.LinkedHashMap; +import java.util.Map; + +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class IndexRecoveryInTimeSeriesModeIT extends IndexRecoveryIT { + @Override + protected boolean inTimeSeriesMode() { + return true; + } + + @Override + protected String minimalMapping() throws IOException { + XContentBuilder b = JsonXContent.contentBuilder().startObject().startObject("properties"); + b.startObject("@timestamp").field("type", "date").endObject(); + b.startObject("dim").field("type", "keyword").field("dimension", true).endObject(); + return Strings.toString(b.endObject().endObject()); + } + + @Override + protected Map source(Map source) { + Map compatibleSource = new LinkedHashMap<>(source); + compatibleSource.put("@timestamp", "2022-01-01T00:00:00Z"); + compatibleSource.put("dim", "dimval"); + return compatibleSource; + } + + @Override + protected boolean dummyDocuments() { + return false; + } + + @Override + protected BackgroundIndexer backgroundIndexer(int numOfDocs) { + return new BackgroundIndexer(INDEX_NAME, "_doc", client(), numOfDocs) { + @Override + protected void extraSource(XContentBuilder builder) throws IOException { + builder.field("@timestamp", "2022-01-01T00:00:00Z"); + builder.field("dim", "dimval"); + } + }; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index e3b38e331f77d..94b737d6942fb 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -36,15 +36,16 @@ import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; @@ -274,8 +275,15 @@ static boolean executeBulkItemRequest(BulkPrimaryExecutionContext context, Updat request.ifSeqNo(), request.ifPrimaryTerm()); } else { final IndexRequest request = context.getRequestToExecute(); - final SourceToParse sourceToParse = new SourceToParse(request.index(), request.id(), request.source(), - request.getContentType(), request.routing(), request.getDynamicTemplates()); + final SourceToParse sourceToParse = new SourceToParse( + request.index(), + request.id(), + request.source(), + request.getContentType(), + routing(request, primary.indexSettings()), + timeSeriesId(request, primary.indexSettings()), + request.getDynamicTemplates() + ); result = primary.applyIndexOperationOnPrimary(version, request.versionType(), sourceToParse, request.ifSeqNo(), request.ifPrimaryTerm(), request.getAutoGeneratedTimestamp(), request.isRetry()); } @@ -473,8 +481,15 @@ private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse case INDEX: final IndexRequest indexRequest = (IndexRequest) docWriteRequest; final ShardId shardId = replica.shardId(); - final SourceToParse sourceToParse = new SourceToParse(shardId.getIndexName(), indexRequest.id(), indexRequest.source(), - indexRequest.getContentType(), indexRequest.routing(), Map.of()); + final SourceToParse sourceToParse = new SourceToParse( + shardId.getIndexName(), + indexRequest.id(), + indexRequest.source(), + indexRequest.getContentType(), + routing(indexRequest, replica.indexSettings()), + timeSeriesId(indexRequest, replica.indexSettings()), + Map.of() + ); result = replica.applyIndexOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getPrimaryTerm(), primaryResponse.getVersion(), indexRequest.getAutoGeneratedTimestamp(), indexRequest.isRetry(), sourceToParse); break; @@ -501,4 +516,14 @@ private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse } return result; } + + private static String routing(IndexRequest request, IndexSettings settings) { + // TODO remove when tsid is a native field on IndexRequest + return settings.inTimeSeriesMode() ? null : request.routing(); + } + + private static BytesReference timeSeriesId(IndexRequest request, IndexSettings settings) { + // TODO remove when tsid is a native field on IndexRequest + return settings.inTimeSeriesMode() ? request.timeSeriesId() : null; + } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index b71f337dd8c9b..d269c0cc881d5 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -30,12 +30,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.TimeSeriesIdGenerator; @@ -282,6 +279,14 @@ public String routing() { return this.routing; } + public BytesReference timeSeriesId() { + // TODO move time series out of routing + if (routing == null) { + throw new IllegalStateException("expected _tsid in _routing"); + } + return new BytesArray(Base64.getDecoder().decode(routing)); + } + /** * Sets the ingest pipeline to be executed before indexing the document */ @@ -649,20 +654,8 @@ private String routingFromTimeSeries( return null; } TimeSeriesIdGenerator gen = timeSeriesGeneratorLookup.apply(abstraction.getWriteIndex()); - if (gen == null) { - return null; - } - try { - try ( - XContentParser parser = contentType.xContent() - .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, source.streamInput()) - ) { - // TODO switch to native BytesRef over the wire - return Base64.getEncoder().encodeToString(BytesReference.toBytes(gen.generate(parser))); - } - } catch (IOException | IllegalArgumentException e) { - throw new IllegalArgumentException("Error building time series id: " + e.getMessage(), e); - } + // TODO switch to native BytesRef over the wire + return gen == null ? null : Base64.getEncoder().encodeToString(BytesReference.toBytes(gen.generate(source, contentType))); } public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 49c555f1cb556..4e2c4d01a9d0a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -453,6 +453,8 @@ private IndexMetadata( this.isSystem = isSystem; this.timestampRange = timestampRange; this.timeSeriesMode = inTimeSeriesMode; + assert false == (timeSeriesMode + && isRoutingPartitionedIndex()) : "time series indices incompatible with routing partitioned indices"; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 3a45cb2442c28..c70a9bcb4ce9a 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -28,7 +28,9 @@ import org.elasticsearch.node.Node; import java.util.Collections; +import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; @@ -341,7 +343,36 @@ public static boolean isTimeSeriesModeEnabled() { * is automatically {@link TimeSeriesIdGenerator generated} using * the fields marked as "dimensions". */ - public static final Setting TIME_SERIES_MODE = Setting.boolSetting("index.time_series_mode", false, Property.IndexScope); + public static final Setting TIME_SERIES_MODE = Setting.boolSetting( // TODO make it "mode" and force the default to "standard" + "index.time_series_mode", + false, + new Setting.Validator() { + @Override + public void validate(Boolean value) {} + + @Override + public void validate(Boolean value, Map, Object> settings) { + if (false == value) { + return; + } + if (settings.get(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING) != Integer.valueOf(1)) { + throw new IllegalArgumentException( + "[" + + TIME_SERIES_MODE.getKey() + + "] is incompatible with [" + + IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.getKey() + + "]" + ); + } + } + + public Iterator> settings() { + List> dependencies = List.of(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING); + return dependencies.iterator(); + } + }, + Property.IndexScope + ); private final Index index; private final Version version; diff --git a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java index 3613455f5929f..d94686e7b384a 100644 --- a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java +++ b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java @@ -16,9 +16,13 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.Nullable; import java.io.IOException; import java.util.ArrayList; @@ -113,6 +117,15 @@ * it can be included in the routing. */ public final class TimeSeriesIdGenerator { + public static TimeSeriesIdGenerator build(@Nullable ObjectComponent root) { + return root == null ? EMPTY : new TimeSeriesIdGenerator(root); + } + + /** + * A generator without an dimensions that'll always + */ + public static final TimeSeriesIdGenerator EMPTY = new TimeSeriesIdGenerator(new ObjectComponent(Map.of())); + /** * The maximum length of the tsid. The value itself comes from a range check in * Lucene's writer for utf-8 doc values. @@ -133,7 +146,7 @@ public final class TimeSeriesIdGenerator { private final ObjectComponent root; - public TimeSeriesIdGenerator(ObjectComponent root) { + private TimeSeriesIdGenerator(ObjectComponent root) { if (root == null) { /* * This can happen if an index is configured in time series mode @@ -159,6 +172,23 @@ public String toString() { return "extract dimensions using " + root; } + /** + * Build the tsid from the {@code _source}. See class docs for more on what it looks like and why. + */ + public BytesReference generate(BytesReference source, XContentType xContentType) { + try { + try ( + XContentParser parser = xContentType.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, source.streamInput()) + ) { + // TODO switch to native BytesRef over the wire + return generate(parser); + } + } catch (IOException | IllegalArgumentException e) { + throw new IllegalArgumentException("Error building time series id: " + e.getMessage(), e); + } + } + /** * Build the tsid from the {@code _source}. See class docs for more on what it looks like and why. */ diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index c3a0cf38d1d22..fab93567b830c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -307,6 +307,18 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException { } // TODO: pass the latest timestamp from engine. final long autoGeneratedIdTimestamp = -1; + /* + * In an ideal world we'd read the _tsid here and dump it into + * the Trasnlog.Index. But there isn't a space for it right now + * because Translog.Index is *exactly* the same thing that we + * fsync on every write operations. And we don't want to fsync + * the _tsid on every write operation. So we don't add it here. + * If we see _tsid regeneration taking a significant chunk of + * time in peer recovery or cross cluster replication then we + * have the option to read the _tsid here and attach it - so + * long as we don't add bytes to the Translog.Index that we + * fsync. + */ op = new Translog.Index(id, seqNo, primaryTerm, version, source.toBytesRef().bytes, fields.routing(), autoGeneratedIdTimestamp); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java index d14af5a264d37..0ce1219a72cda 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java @@ -190,8 +190,16 @@ private LeafReader getDelegate() { private LeafReader createInMemoryLeafReader() { assert Thread.holdsLock(this); - final ParsedDocument parsedDocs = documentParser.parseDocument(new SourceToParse(shardId.getIndexName(), operation.id(), - operation.source(), XContentHelper.xContentType(operation.source()), operation.routing(), Map.of()), mappingLookup); + final SourceToParse sourceToParse = SourceToParse.parseTimeSeriesIdFromSource( + shardId.getIndexName(), + operation.id(), + operation.source(), + XContentHelper.xContentType(operation.source()), + operation.routing(), + Map.of(), + mappingLookup + ); + final ParsedDocument parsedDocs = documentParser.parseDocument(sourceToParse, mappingLookup); parsedDocs.updateSeqID(operation.seqNo(), operation.primaryTerm()); parsedDocs.version().setLongValue(operation.version()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 65995b036d533..b2db42b9da081 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -10,14 +10,12 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.TimeSeriesIdGenerator; public class DocumentMapper { private final String type; private final CompressedXContent mappingSource; private final MappingLookup mappingLookup; private final DocumentParser documentParser; - private final TimeSeriesIdGenerator timeSeriesIdGenerator; /** * Create a new {@link DocumentMapper} that holds empty mappings. @@ -27,16 +25,15 @@ public class DocumentMapper { public static DocumentMapper createEmpty(MapperService mapperService) { RootObjectMapper root = new RootObjectMapper.Builder(MapperService.SINGLE_MAPPING_NAME).build(new ContentPath(1)); MetadataFieldMapper[] metadata = mapperService.getMetadataMappers().values().toArray(new MetadataFieldMapper[0]); - Mapping mapping = new Mapping(root, metadata, null); + Mapping mapping = new Mapping(root, metadata, null, mapperService.getIndexSettings().inTimeSeriesMode()); return new DocumentMapper(mapperService.documentParser(), mapping, mapperService.getIndexSettings().inTimeSeriesMode()); } DocumentMapper(DocumentParser documentParser, Mapping mapping, boolean inTimeSeriesMode) { this.documentParser = documentParser; this.type = mapping.getRoot().name(); - this.mappingLookup = MappingLookup.fromMapping(mapping); + this.mappingLookup = MappingLookup.fromMapping(mapping, inTimeSeriesMode); this.mappingSource = mapping.toCompressedXContent(); - timeSeriesIdGenerator = inTimeSeriesMode ? mapping.buildTimeSeriesIdGenerator() : null; } public Mapping mapping() { @@ -71,10 +68,6 @@ public IndexFieldMapper IndexFieldMapper() { return metadataMapper(IndexFieldMapper.class); } - public TimeSeriesIdGenerator getTimeSeriesIdGenerator() { - return timeSeriesIdGenerator; - } - public MappingLookup mappers() { return this.mappingLookup; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 0896ddfe6a2e0..033761f78c58a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -120,8 +120,13 @@ public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, Map metadataMapperParsers = mapperRegistry.getMetadataMapperParsers(indexSettings.getIndexVersionCreated()); this.parserContextSupplier = () -> parserContextFunction.apply(null); - this.mappingParser = new MappingParser(parserContextSupplier, metadataMapperParsers, - this::getMetadataMappers, this::resolveDocumentType); + this.mappingParser = new MappingParser( + parserContextSupplier, + metadataMapperParsers, + this::getMetadataMappers, + this::resolveDocumentType, + indexSettings.inTimeSeriesMode() + ); } public boolean hasNested() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index e8b4387f36c9f..62f0ada9274d5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -36,15 +36,21 @@ public final class Mapping implements ToXContentFragment { public static final Mapping EMPTY = new Mapping( - new RootObjectMapper.Builder("_doc").build(new ContentPath()), new MetadataFieldMapper[0], null); + new RootObjectMapper.Builder("_doc").build(new ContentPath()), new MetadataFieldMapper[0], null, false); private final RootObjectMapper root; private final Map meta; private final MetadataFieldMapper[] metadataMappers; private final Map, MetadataFieldMapper> metadataMappersMap; private final Map metadataMappersByName; - - public Mapping(RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map meta) { + private final TimeSeriesIdGenerator timeSeriesIdGenerator; + + public Mapping( + RootObjectMapper rootObjectMapper, + MetadataFieldMapper[] metadataMappers, + Map meta, + boolean inTimeSeriesMode + ) { this.metadataMappers = metadataMappers; Map, MetadataFieldMapper> metadataMappersMap = new HashMap<>(); Map metadataMappersByName = new HashMap<>(); @@ -63,7 +69,9 @@ public int compare(Mapper o1, Mapper o2) { this.metadataMappersMap = unmodifiableMap(metadataMappersMap); this.metadataMappersByName = unmodifiableMap(metadataMappersByName); this.meta = meta; - + this.timeSeriesIdGenerator = inTimeSeriesMode + ? TimeSeriesIdGenerator.build(root.selectTimeSeriesIdComponents()) + : null; } /** @@ -121,7 +129,7 @@ void validate(MappingLookup mappers) { * Generate a mapping update for the given root object mapper. */ Mapping mappingUpdate(RootObjectMapper rootObjectMapper) { - return new Mapping(rootObjectMapper, metadataMappers, meta); + return new Mapping(rootObjectMapper, metadataMappers, meta, inTimeSeriesMode()); } /** @@ -162,7 +170,7 @@ Mapping merge(Mapping mergeWith, MergeReason reason) { XContentHelper.mergeDefaults(mergedMeta, meta); } - return new Mapping(mergedRoot, mergedMetadataMappers.values().toArray(new MetadataFieldMapper[0]), mergedMeta); + return new Mapping(mergedRoot, mergedMetadataMappers.values().toArray(new MetadataFieldMapper[0]), mergedMeta, inTimeSeriesMode()); } @Override @@ -190,7 +198,11 @@ public String toString() { } } - TimeSeriesIdGenerator buildTimeSeriesIdGenerator() { - return new TimeSeriesIdGenerator(root.selectTimeSeriesIdComponents()); + public TimeSeriesIdGenerator getTimeSeriesIdGenerator() { + return timeSeriesIdGenerator; + } + + private boolean inTimeSeriesMode() { + return timeSeriesIdGenerator != null; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 61594fa0144da..e78e911730dd9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -58,7 +58,7 @@ private CacheKey() {} * @param mapping the mapping source * @return the newly created lookup instance */ - public static MappingLookup fromMapping(Mapping mapping) { + public static MappingLookup fromMapping(Mapping mapping, boolean inTimeSeriesMode) { List newObjectMappers = new ArrayList<>(); List newFieldMappers = new ArrayList<>(); List newFieldAliasMappers = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java index d04b97f97738d..155bba74f3662 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java @@ -30,15 +30,18 @@ public final class MappingParser { private final Supplier, MetadataFieldMapper>> metadataMappersSupplier; private final Map metadataMapperParsers; private final Function documentTypeResolver; + private final boolean inTimeSeriesMode; MappingParser(Supplier parserContextSupplier, Map metadataMapperParsers, Supplier, MetadataFieldMapper>> metadataMappersSupplier, - Function documentTypeResolver) { + Function documentTypeResolver, + boolean inTimeSeriesMode) { this.parserContextSupplier = parserContextSupplier; this.metadataMapperParsers = metadataMapperParsers; this.metadataMappersSupplier = metadataMappersSupplier; this.documentTypeResolver = documentTypeResolver; + this.inTimeSeriesMode = inTimeSeriesMode; } /** @@ -145,6 +148,7 @@ private Mapping parse(String type, Map mapping) throws MapperPar return new Mapping( rootObjectMapper, metadataMappers.values().toArray(new MetadataFieldMapper[0]), - meta); + meta, + inTimeSeriesMode); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index c7dd591a0321e..528c797b3429b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -8,15 +8,49 @@ package org.elasticsearch.index.mapper; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.Nullable; import java.util.Map; import java.util.Objects; +import java.util.function.Function; public class SourceToParse { + public static Function parseTimeSeriesIdFromSource( + String index, + String id, + BytesReference source, + XContentType xContentType, + @Nullable String routing, + Map dynamicTemplates + ) { + return documentMapper -> parseTimeSeriesIdFromSource( + index, + id, + source, + xContentType, + routing, + dynamicTemplates, + documentMapper.mappers() + ); + } + + public static SourceToParse parseTimeSeriesIdFromSource( + String index, + String id, + BytesReference source, + XContentType xContentType, + @Nullable String routing, + Map dynamicTemplates, + MappingLookup lookup + ) { + BytesReference timeSeriesId = lookup.getMapping().getTimeSeriesIdGenerator() == null + ? null + : lookup.getMapping().getTimeSeriesIdGenerator().generate(source, xContentType); + return new SourceToParse(index, id, source, xContentType, routing, timeSeriesId, dynamicTemplates); + } private final BytesReference source; @@ -26,12 +60,19 @@ public class SourceToParse { private final @Nullable String routing; + private final @Nullable BytesReference timeSeriesId; + private final XContentType xContentType; private final Map dynamicTemplates; public SourceToParse(String index, String id, BytesReference source, XContentType xContentType, @Nullable String routing, - Map dynamicTemplates) { + @Nullable BytesReference timeSeriesId, Map dynamicTemplates) { + if (routing != null && timeSeriesId != null) { + throw new IllegalArgumentException( + "only one of routing or timeSeriesId are supported but got [" + routing + "] and " + timeSeriesId.toBytesRef() + ); + } this.index = Objects.requireNonNull(index); this.id = Objects.requireNonNull(id); // we always convert back to byte array, since we store it and Field only supports bytes.. @@ -39,11 +80,12 @@ public SourceToParse(String index, String id, BytesReference source, XContentTyp this.source = new BytesArray(Objects.requireNonNull(source).toBytesRef()); this.xContentType = Objects.requireNonNull(xContentType); this.routing = routing; + this.timeSeriesId = timeSeriesId; this.dynamicTemplates = Objects.requireNonNull(dynamicTemplates); } public SourceToParse(String index, String id, BytesReference source, XContentType xContentType) { - this(index, id, source, xContentType, null, Map.of()); + this(index, id, source, xContentType, null, null, Map.of()); } public BytesReference source() { @@ -62,6 +104,10 @@ public String id() { return this.routing; } + public @Nullable BytesReference timeSeriesId() { + return this.timeSeriesId; + } + /** * Returns a map from the full path (i.e. foo.bar) of field names to the names of dynamic mapping templates. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index 028e017455029..9aba859e40629 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -10,7 +10,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.search.Query; -import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; @@ -21,7 +21,6 @@ import java.io.IOException; import java.time.ZoneId; -import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.function.Supplier; @@ -105,13 +104,12 @@ public void preParse(DocumentParserContext context) throws IOException { } assert fieldType().isSearchable() == false; - String routing = context.sourceToParse().routing(); - if (routing == null) { + BytesReference timeSeriesId = context.sourceToParse().timeSeriesId(); + if (timeSeriesId == null) { throw new IllegalArgumentException("In time series mode the tsid need to be in the routing"); } // TODO switch to native BytesRef over the wire, leaving the routing alone - BytesRef value = new BytesRef(Base64.getDecoder().decode(routing)); - context.doc().add(new SortedSetDocValuesField(fieldType().name(), value)); + context.doc().add(new SortedSetDocValuesField(fieldType().name(), timeSeriesId.toBytesRef())); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 71ea3f1b103d4..a7d99d1165d15 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -828,26 +828,26 @@ public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType throws IOException { assert versionType.validateVersionForWrites(version); return applyIndexOperation(getEngine(), UNASSIGNED_SEQ_NO, getOperationPrimaryTerm(), version, versionType, ifSeqNo, - ifPrimaryTerm, autoGeneratedTimestamp, isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse); + ifPrimaryTerm, autoGeneratedTimestamp, isRetry, Engine.Operation.Origin.PRIMARY, documentMapper -> sourceToParse); } public Engine.IndexResult applyIndexOperationOnReplica(long seqNo, long opPrimaryTerm, long version, long autoGeneratedTimeStamp, boolean isRetry, SourceToParse sourceToParse) throws IOException { return applyIndexOperation(getEngine(), seqNo, opPrimaryTerm, version, null, UNASSIGNED_SEQ_NO, 0, - autoGeneratedTimeStamp, isRetry, Engine.Operation.Origin.REPLICA, sourceToParse); + autoGeneratedTimeStamp, isRetry, Engine.Operation.Origin.REPLICA, documentMapper -> sourceToParse); } private Engine.IndexResult applyIndexOperation(Engine engine, long seqNo, long opPrimaryTerm, long version, @Nullable VersionType versionType, long ifSeqNo, long ifPrimaryTerm, long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin, - SourceToParse sourceToParse) throws IOException { + Function sourceSource) throws IOException { assert opPrimaryTerm <= getOperationPrimaryTerm() : "op term [ " + opPrimaryTerm + " ] > shard term [" + getOperationPrimaryTerm() + "]"; ensureWriteAllowed(origin); Engine.Index operation; try { - operation = prepareIndex(mapperService, sourceToParse, + operation = prepareIndex(mapperService, sourceSource, seqNo, opPrimaryTerm, version, versionType, origin, autoGeneratedTimeStamp, isRetry, ifSeqNo, ifPrimaryTerm); Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); if (update != null) { @@ -865,19 +865,20 @@ private Engine.IndexResult applyIndexOperation(Engine engine, long seqNo, long o return index(engine, operation); } - public static Engine.Index prepareIndex(MapperService mapperService, SourceToParse source, long seqNo, + public static Engine.Index prepareIndex(MapperService mapperService, Function sourceSource, long seqNo, long primaryTerm, long version, VersionType versionType, Engine.Operation.Origin origin, long autoGeneratedIdTimestamp, boolean isRetry, long ifSeqNo, long ifPrimaryTerm) { long startTime = System.nanoTime(); - assert source.dynamicTemplates().isEmpty() || origin == Engine.Operation.Origin.PRIMARY : - "dynamic_templates parameter can only be associated with primary operations"; DocumentMapper documentMapper = mapperService.documentMapper(); Mapping mapping = null; if (documentMapper == null) { documentMapper = DocumentMapper.createEmpty(mapperService); mapping = documentMapper.mapping(); } + SourceToParse source = sourceSource.apply(documentMapper); + assert source.dynamicTemplates().isEmpty() || origin == Engine.Operation.Origin.PRIMARY : + "dynamic_templates parameter can only be associated with primary operations"; ParsedDocument doc = documentMapper.parse(source); if (mapping != null) { //If we are indexing but there is no mapping we create one. This is to ensure that whenever at least a document is indexed @@ -1576,12 +1577,19 @@ private Engine.Result applyTranslogOperation(Engine engine, Translog.Operation o switch (operation.opType()) { case INDEX: final Translog.Index index = (Translog.Index) operation; + final Function sourceSource = SourceToParse.parseTimeSeriesIdFromSource( + shardId.getIndexName(), + index.id(), + index.source(), + XContentHelper.xContentType(index.source()), + index.routing(), + Map.of() + ); // we set canHaveDuplicates to true all the time such that we de-optimze the translog case and ensure that all // autoGeneratedID docs that are coming from the primary are updated correctly. result = applyIndexOperation(engine, index.seqNo(), index.primaryTerm(), index.version(), versionType, UNASSIGNED_SEQ_NO, 0, index.getAutoGeneratedIdTimestamp(), true, origin, - new SourceToParse(shardId.getIndexName(), index.id(), index.source(), - XContentHelper.xContentType(index.source()), index.routing(), Map.of())); + sourceSource); break; case DELETE: final Translog.Delete delete = (Translog.Delete) operation; diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index 4afa98edc7f30..9a4e2af9dcfb8 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.regex.Regex; @@ -282,10 +283,13 @@ private static Fields generateTermVectors(IndexShard indexShard, } private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException { + MappingLookup mappingLookup = indexShard.mapperService().mappingLookup(); + BytesReference timeSeriesId = indexShard.mapperService().getIndexSettings().inTimeSeriesMode() + ? mappingLookup.getMapping().getTimeSeriesIdGenerator().generate(request.doc(), request.xContentType()) + : null; SourceToParse source = new SourceToParse(indexShard.shardId().getIndexName(), "_id_for_tv_api", request.doc(), - request.xContentType(), request.routing(), Map.of()); + request.xContentType(), request.routing(), timeSeriesId, Map.of()); DocumentParser documentParser = indexShard.mapperService().documentParser(); - MappingLookup mappingLookup = indexShard.mapperService().mappingLookup(); ParsedDocument parsedDocument = documentParser.parseDocument(source, mappingLookup); // select the right fields and generate term vectors LuceneDocument doc = parsedDocument.rootDoc(); diff --git a/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java index 3a9aa9e1673d8..8f6134d1d644c 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java @@ -89,7 +89,7 @@ public long metadataVersion() { @Override public TimeSeriesIdGenerator generator() { - return local.mapperService().documentMapper().getTimeSeriesIdGenerator(); + return local.mapperService().mappingLookup().getMapping().getTimeSeriesIdGenerator(); } }; }; @@ -99,7 +99,7 @@ public TimeSeriesIdGenerator generator() { try { try (MapperService tmp = indicesService.createIndexMapperService(indexMetadata)) { tmp.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY); - TimeSeriesIdGenerator gen = tmp.documentMapper().getTimeSeriesIdGenerator(); + TimeSeriesIdGenerator gen = tmp.mappingLookup().getMapping().getTimeSeriesIdGenerator(); logger.trace("computed timeseries id generator for {}", indexMetadata.getIndex()); return gen; } @@ -146,7 +146,7 @@ public TimeSeriesIdGenerator apply(IndexMetadata meta) { */ if (meta.getMappingVersion() > v.mappingVersion) { throw new IllegalStateException( - "Got a newer version fo the index than the time series id generator [" + "Got a newer version of the index than the time series id generator [" + meta.getMappingVersion() + "] vs [" + v.mappingVersion @@ -185,6 +185,12 @@ void applyClusterState(Metadata metadata) { continue; } Index index = indexMetadata.getIndex(); + + if (indexMetadata.mapping() == null) { + byIndex.put(index, new PreBuiltValue(indexMetadata.getMappingVersion(), TimeSeriesIdGenerator.EMPTY)); + continue; + } + DedupeKey key = new DedupeKey(indexMetadata); /* diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index c2bf804d21b10..c71e26e8d9ef6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -581,7 +581,7 @@ protected String contentType() { RootObjectMapper.Builder root = new RootObjectMapper.Builder("_doc"); root.add(new DateFieldMapper.Builder(dataStream.getTimeStampField().getName(), DateFieldMapper.Resolution.MILLISECONDS, DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ScriptCompiler.NONE, true, Version.CURRENT)); - Mapping mapping = new Mapping(root.build(new ContentPath("")), metadataFieldMappers, Collections.emptyMap()); + Mapping mapping = new Mapping(root.build(new ContentPath("")), metadataFieldMappers, Collections.emptyMap(), randomBoolean()); MappingLookup mappingLookup = MappingLookup.fromMappers( mapping, List.of(mockedTimestampField, dateFieldMapper), diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 2a764a84cd1d6..3c19b89b23702 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -224,16 +224,20 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { } public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { + boolean timeSeriesMode = randomBoolean(); BulkItemRequest[] items = new BulkItemRequest[1]; - DocWriteRequest writeRequest = new IndexRequest("index").id("id") - .source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + IndexRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); + if (timeSeriesMode) { + writeRequest.routing("tsid"); + } items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - Engine.IndexResult mappingUpdate = - new Engine.IndexResult(new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap())); + Engine.IndexResult mappingUpdate = new Engine.IndexResult( + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), timeSeriesMode) + ); Translog.Location resultLocation = new Translog.Location(42, 42, 42); Engine.IndexResult success = new FakeIndexResult(1, 1, 13, true, resultLocation); @@ -242,6 +246,7 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())) .thenReturn(mappingUpdate); when(shard.mapperService()).thenReturn(mock(MapperService.class)); + when(shard.indexSettings()).thenReturn(indexSettings(timeSeriesMode)); randomlySetIgnoredPrimaryResponse(items[0]); @@ -761,8 +766,9 @@ public void testRetries() throws Exception { Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0); - Engine.IndexResult mappingUpdate = - new Engine.IndexResult(new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap())); + Engine.IndexResult mappingUpdate = new Engine.IndexResult( + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), randomBoolean()) + ); Translog.Location resultLocation = new Translog.Location(42, 42, 42); Engine.IndexResult success = new FakeIndexResult(1, 1, 13, true, resultLocation); @@ -841,8 +847,10 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { items[1] = new BulkItemRequest(1, writeRequest2); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - Engine.IndexResult mappingUpdate = - new Engine.IndexResult(new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap())); + boolean timeSeriesMode = false; + Engine.IndexResult mappingUpdate = new Engine.IndexResult( + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), timeSeriesMode) + ); Translog.Location resultLocation1 = new Translog.Location(42, 36, 36); Translog.Location resultLocation2 = new Translog.Location(42, 42, 42); Engine.IndexResult success1 = new FakeIndexResult(1, 1, 10, true, resultLocation1); @@ -854,6 +862,7 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { .thenReturn(success1, mappingUpdate, success2); when(shard.getFailedIndexResult(any(EsRejectedExecutionException.class), anyLong())).thenCallRealMethod(); when(shard.mapperService()).thenReturn(mock(MapperService.class)); + when(shard.indexSettings()).thenReturn(indexSettings(timeSeriesMode)); randomlySetIgnoredPrimaryResponse(items[0]); @@ -953,6 +962,13 @@ public void testPerformOnPrimaryReportsBulkStats() throws Exception { latch.await(); } + private IndexSettings indexSettings(boolean timeSeriesMode) throws IOException { + return new IndexSettings( + indexMetadata(), + Settings.builder().put(idxSettings).put(IndexSettings.TIME_SERIES_MODE.getKey(), timeSeriesMode).build() + ); + } + private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { if (randomBoolean()) { // add a response to the request and thereby check that it is ignored for the primary. diff --git a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java index d6149dee28627..5feafb3799487 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java @@ -144,7 +144,7 @@ public void testSendUpdateMappingUsingAutoPutMappingAction() { mua.setClient(client); RootObjectMapper rootObjectMapper = new RootObjectMapper.Builder("name").build(new ContentPath()); - Mapping update = new Mapping(rootObjectMapper, new MetadataFieldMapper[0], Map.of()); + Mapping update = new Mapping(rootObjectMapper, new MetadataFieldMapper[0], Map.of(), randomBoolean()); mua.sendUpdateMapping(new Index("name", "uuid"), update, ActionListener.wrap(() -> {})); verify(indicesAdminClient).execute(eq(AutoPutMappingAction.INSTANCE), any(), any()); diff --git a/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java index 47d2344588f37..74e605f466a59 100644 --- a/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java @@ -81,7 +81,7 @@ public void testKeywordNull() throws IOException { } private TimeSeriesIdGenerator keywordTimeSeriesIdGenerator() { - return new TimeSeriesIdGenerator( + return TimeSeriesIdGenerator.build( new ObjectComponent(Map.of("a", keywordComponent(), "o", new ObjectComponent(Map.of("e", keywordComponent())))) ); } @@ -243,7 +243,7 @@ public void testByteNull() throws IOException { } private TimeSeriesIdGenerator timeSeriedIdForNumberType(NumberType numberType) { - return new TimeSeriesIdGenerator( + return TimeSeriesIdGenerator.build( new ObjectComponent( Map.of( "a", @@ -280,7 +280,7 @@ public void testIpNull() throws IOException { } private TimeSeriesIdGenerator timeSeriedIdForIp() { - return new TimeSeriesIdGenerator( + return TimeSeriesIdGenerator.build( new ObjectComponent( Map.of( "a", @@ -303,7 +303,7 @@ public void testVeryLarge() { doc.put("d" + i, large); components.put("d" + i, keywordComponent()); } - TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(new ObjectComponent(components)); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(new ObjectComponent(components)); Exception e = expectThrows(IllegalArgumentException.class, () -> gen.generate(parser(doc))); assertThat(e.getMessage(), equalTo("tsid longer than [32766] bytes [55691]")); } @@ -313,7 +313,7 @@ public void testVeryLarge() { */ public void testSameGenConsistentForSameDoc() throws IOException { Map doc = randomDoc(between(1, 100), between(0, 2)); - TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(randomDimensionsFromDoc(doc))); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(randomDimensionsFromDoc(doc))); assertThat(gen.generate(parser(doc)), equalTo(gen.generate(parser(doc)))); } @@ -323,7 +323,7 @@ public void testSameGenConsistentForSameDoc() throws IOException { public void testExtraFieldsDoNotMatter() throws IOException { Map doc = randomDoc(between(1, 100), between(0, 2)); Map dimensions = randomDimensionsFromDoc(doc); - TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); assertThat(gen.generate(parser(dimensions)), equalTo(gen.generate(parser(doc)))); } @@ -333,7 +333,7 @@ public void testExtraFieldsDoNotMatter() throws IOException { public void testOrderDoesNotMatter() throws IOException { Map doc = randomDoc(between(1, 100), between(0, 2)); Map dimensions = randomDimensionsFromDoc(doc); - TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); assertThat(gen.generate(parser(shuffled(doc))), equalTo(gen.generate(parser(doc)))); } @@ -343,9 +343,9 @@ public void testOrderDoesNotMatter() throws IOException { public void testUnusedExtraDimensions() throws IOException { Map doc = randomDoc(between(1, 100), between(0, 2)); Map dimensions = randomDimensionsFromDoc(doc); - TimeSeriesIdGenerator small = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + TimeSeriesIdGenerator small = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); dimensions.put(randomValueOtherThanMany(doc::containsKey, () -> randomAlphaOfLength(5)), randomAlphaOfLength(3)); - TimeSeriesIdGenerator large = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + TimeSeriesIdGenerator large = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); assertThat(large.generate(parser(doc)), equalTo(small.generate(parser(doc)))); } @@ -357,14 +357,14 @@ public void testDifferentValues() throws IOException { Map orig = randomDoc(between(1, 100), between(0, 2)); Map dimensions = randomDimensionsFromDoc(orig); Map modified = modifyDimensionValue(orig, dimensions); - TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); assertThat(gen.generate(parser(modified)), not(equalTo(gen.generate(parser(orig))))); } public void testParse() throws IOException { Map doc = randomDoc(between(1, 100), between(0, 2)); Map dimensions = randomDimensionsFromDoc(doc); - TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); assertMap(TimeSeriesIdGenerator.parse(gen.generate(parser(doc)).streamInput()), expectedParsedDimensions(dimensions)); assertMap(TimeSeriesIdGenerator.parse(gen.generate(parser(shuffled(doc))).streamInput()), expectedParsedDimensions(dimensions)); } @@ -410,9 +410,9 @@ private Map modifyDimensionValue(Map doc, Map dimens public void testDifferentDimensions() throws IOException { Map origDoc = randomDoc(between(1, 10), between(0, 2)); Map origDimensions = randomDimensionsFromDoc(origDoc); - TimeSeriesIdGenerator origGen = new TimeSeriesIdGenerator(objectComponentForDimensions(origDimensions)); + TimeSeriesIdGenerator origGen = TimeSeriesIdGenerator.build(objectComponentForDimensions(origDimensions)); Tuple, Map> modified = modifyDimensionName(origDoc, origDimensions); - TimeSeriesIdGenerator modGen = new TimeSeriesIdGenerator(objectComponentForDimensions(modified.v2())); + TimeSeriesIdGenerator modGen = TimeSeriesIdGenerator.build(objectComponentForDimensions(modified.v2())); assertThat(modGen.generate(parser(modified.v1())), not(equalTo(origGen.generate(parser(origDoc))))); } @@ -452,14 +452,14 @@ public void testFewerDimensions() throws IOException { Map orig = randomDoc(between(2, 100), between(0, 2)); Map dimensions = randomDimensionsFromDoc(orig, 2, 10); Map modified = removeDimension(orig, dimensions); - TimeSeriesIdGenerator gen = new TimeSeriesIdGenerator(objectComponentForDimensions(dimensions)); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); assertThat(gen.generate(parser(modified)), not(equalTo(gen.generate(parser(orig))))); } public void testEmpty() throws IOException { Exception e = expectThrows( IllegalArgumentException.class, - () -> new TimeSeriesIdGenerator(null).generate(parser(Map.of())).streamInput() + () -> TimeSeriesIdGenerator.build(null).generate(parser(Map.of())).streamInput() ); assertThat(e.getMessage(), equalTo("There aren't any mapped dimensions")); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index 63f5f9e7fe44b..abb877982ad82 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -337,7 +337,7 @@ public void testContainsTimeSeriesGenerator() throws IOException { mapping(b -> b.startObject("dim").field("type", "keyword").field("dimension", true).endObject()) ).documentMapper(); assertThat( - TimeSeriesIdGenerator.parse(documentMapper.getTimeSeriesIdGenerator() + TimeSeriesIdGenerator.parse(documentMapper.mapping().getTimeSeriesIdGenerator() .generate( new MapXContentParser( NamedXContentRegistry.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index efd862b95a77c..a31a9329bbcd7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -1060,13 +1060,13 @@ public void testWithDynamicTemplates() throws Exception { String field = randomFrom("loc", "foo.loc", "foo.bar.loc"); - ParsedDocument doc = mapper.parse(source("1", b -> b.field(field, "41.12,-71.34"), null, Map.of(field, "points"))); + ParsedDocument doc = mapper.parse(source("1", b -> b.field(field, "41.12,-71.34"), null, null, Map.of(field, "points"))); IndexableField[] fields = doc.rootDoc().getFields(field); assertThat(fields, arrayWithSize(2)); assertThat(fields[0].fieldType(), sameInstance(LatLonPoint.TYPE)); assertThat(fields[1].fieldType(), sameInstance(LatLonDocValuesField.TYPE)); - doc = mapper.parse(source("1", b -> b.field(field, new double[]{-71.34, 41.12}), null, Map.of(field, "points"))); + doc = mapper.parse(source("1", b -> b.field(field, new double[]{-71.34, 41.12}), null, null, Map.of(field, "points"))); fields = doc.rootDoc().getFields(field); assertThat(fields, arrayWithSize(2)); assertThat(fields[0].fieldType(), sameInstance(LatLonPoint.TYPE)); @@ -1077,13 +1077,15 @@ public void testWithDynamicTemplates() throws Exception { b.field("lat", "-71.34"); b.field("lon", 41.12); b.endObject(); - }, null, Map.of(field, "points"))); + }, null, null, Map.of(field, "points"))); fields = doc.rootDoc().getFields(field); assertThat(fields, arrayWithSize(2)); assertThat(fields[0].fieldType(), sameInstance(LatLonPoint.TYPE)); assertThat(fields[1].fieldType(), sameInstance(LatLonDocValuesField.TYPE)); - doc = mapper.parse(source("1", b -> b.field(field, new String[]{"41.12,-71.34", "43,-72.34"}), null, Map.of(field, "points"))); + doc = mapper.parse( + source("1", b -> b.field(field, new String[] { "41.12,-71.34", "43,-72.34" }), null, null, Map.of(field, "points")) + ); fields = doc.rootDoc().getFields(field); assertThat(fields, arrayWithSize(4)); assertThat(fields[0].fieldType(), sameInstance(LatLonPoint.TYPE)); @@ -1103,7 +1105,7 @@ public void testWithDynamicTemplates() throws Exception { b.field("lon", 41.12); b.endObject(); b.endArray(); - }, null, Map.of(field, "points"))); + }, null, null, Map.of(field, "points"))); fields = doc.rootDoc().getFields(field); assertThat(fields, arrayWithSize(4)); assertThat(fields[0].fieldType(), sameInstance(LatLonPoint.TYPE)); @@ -1115,7 +1117,7 @@ public void testWithDynamicTemplates() throws Exception { b.startObject("address"); b.field("home", "43,-72.34"); b.endObject(); - }, null, Map.of("address.home", "points"))); + }, null, null, Map.of("address.home", "points"))); fields = doc.rootDoc().getFields("address.home"); assertThat(fields, arrayWithSize(2)); assertThat(fields[0].fieldType(), sameInstance(LatLonPoint.TYPE)); @@ -1146,12 +1148,12 @@ public void testDynamicTemplatesNotFound() throws Exception { b.endArray(); })); String field = randomFrom("foo", "foo.bar", "foo.bar.baz"); - ParsedDocument doc = mapper.parse(source("1", b -> b.field(field, "true"), null, Map.of(field, "booleans"))); + ParsedDocument doc = mapper.parse(source("1", b -> b.field(field, "true"), null, null, Map.of(field, "booleans"))); IndexableField[] fields = doc.rootDoc().getFields(field); assertThat(fields, arrayWithSize(1)); assertThat(fields[0].fieldType(), sameInstance(BooleanFieldMapper.Defaults.FIELD_TYPE)); MapperParsingException error = expectThrows(MapperParsingException.class, () -> - mapper.parse(source("1", b -> b.field(field, "hello"), null, Map.of(field, "foo_bar")))); + mapper.parse(source("1", b -> b.field(field, "hello"), null, null, Map.of(field, "foo_bar")))); assertThat(error.getMessage(), containsString("Can't find dynamic template for dynamic template name [foo_bar] of field [" + field + "]")); } @@ -1181,11 +1183,11 @@ public void testWrongTypeDynamicTemplate() throws Exception { })); String field = randomFrom("foo.bar", "foo.bar.baz"); MapperParsingException error = expectThrows(MapperParsingException.class, - () -> mapper.parse(source("1", b -> b.field(field, "true"), null, Map.of("foo", "booleans")))); + () -> mapper.parse(source("1", b -> b.field(field, "true"), null, null, Map.of("foo", "booleans")))); assertThat(error.getMessage(), containsString("Field [foo] must be an object; but it's configured as [boolean] in dynamic template [booleans]")); - ParsedDocument doc = mapper.parse(source("1", b -> b.field(field, "true"), null, Map.of(field, "booleans"))); + ParsedDocument doc = mapper.parse(source("1", b -> b.field(field, "true"), null, null, Map.of(field, "booleans"))); IndexableField[] fields = doc.rootDoc().getFields(field); assertThat(fields, arrayWithSize(1)); assertThat(fields[0].fieldType(), sameInstance(BooleanFieldMapper.Defaults.FIELD_TYPE)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index 545275c0fdb26..00af29caeee2e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -655,8 +655,18 @@ public void testTemplateWithoutMatchPredicates() throws Exception { } mapping.endObject(); MapperService mapperService = createMapperService(mapping); - ParsedDocument doc = mapperService.documentMapper().parse(new SourceToParse("test", "1", - new BytesArray("{\"foo\": \"41.12,-71.34\", \"bar\": \"41.12,-71.34\"}"), XContentType.JSON, null, Map.of("foo", "geo_point"))); + ParsedDocument doc = mapperService.documentMapper() + .parse( + new SourceToParse( + "test", + "1", + new BytesArray("{\"foo\": \"41.12,-71.34\", \"bar\": \"41.12,-71.34\"}"), + XContentType.JSON, + null, + null, + Map.of("foo", "geo_point") + ) + ); assertThat(doc.rootDoc().getFields("foo"), arrayWithSize(2)); assertThat(doc.rootDoc().getFields("bar"), arrayWithSize(1)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index 4c64316b0ec13..3c899d64a62ca 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -198,7 +198,12 @@ private static MappingLookup createMappingLookup(List fieldMappers, RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc"); Map runtimeFieldTypes = runtimeFields.stream().collect(Collectors.toMap(RuntimeField::name, r -> r)); builder.setRuntime(runtimeFieldTypes); - Mapping mapping = new Mapping(builder.build(new ContentPath()), new MetadataFieldMapper[0], Collections.emptyMap()); + Mapping mapping = new Mapping( + builder.build(new ContentPath()), + new MetadataFieldMapper[0], + Collections.emptyMap(), + randomBoolean() + ); return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, fieldAliasMappers); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 235fb719f3d9d..c30d5a9871b31 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -38,7 +38,12 @@ private static MappingLookup createMappingLookup(List fieldMappers, RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc"); Map runtimeFieldTypes = runtimeFields.stream().collect(Collectors.toMap(RuntimeField::name, r -> r)); builder.setRuntime(runtimeFieldTypes); - Mapping mapping = new Mapping(builder.build(new ContentPath()), new MetadataFieldMapper[0], Collections.emptyMap()); + Mapping mapping = new Mapping( + builder.build(new ContentPath()), + new MetadataFieldMapper[0], + Collections.emptyMap(), + randomBoolean() + ); return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, emptyList()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index b706834c7089f..4786a03ec29e2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -45,7 +45,7 @@ private static MappingParser createMappingParser(Settings settings) { metadataMapperParsers.values().stream().map(parser -> parser.getDefault(parserContextSupplier.get())) .forEach(m -> metadataMappers.put(m.getClass(), m)); return new MappingParser(parserContextSupplier, metadataMapperParsers, - () -> metadataMappers, type -> MapperService.SINGLE_MAPPING_NAME); + () -> metadataMappers, type -> MapperService.SINGLE_MAPPING_NAME, randomBoolean()); } public void testFieldNameWithDots() throws Exception { @@ -76,7 +76,7 @@ public void testFieldNameWithDeepDots() throws Exception { b.endObject(); }); Mapping mapping = createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(BytesReference.bytes(builder))); - MappingLookup mappingLookup = MappingLookup.fromMapping(mapping); + MappingLookup mappingLookup = MappingLookup.fromMapping(mapping, randomBoolean()); assertNotNull(mappingLookup.getMapper("foo.bar")); assertNotNull(mappingLookup.getMapper("foo.baz.deep.field")); assertNotNull(mappingLookup.objectMappers().get("foo")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java index 9183cbbf1f7b7..5dd5a05f0c513 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RoutingFieldMapperTests.java @@ -38,13 +38,13 @@ public void testRoutingMapper() throws Exception { .startObject() .field("field", "value") .endObject()), - XContentType.JSON, "routing_value", Map.of())); + XContentType.JSON, "routing_value", null, Map.of())); assertThat(doc.rootDoc().get("_routing"), equalTo("routing_value")); assertThat(doc.rootDoc().get("field"), equalTo("value")); } - public void testIncludeInObjectNotAllowed() throws Exception { + public void testIncludeInDocumentNotAllowed() throws Exception { DocumentMapper docMapper = createDocumentMapper(mapping(b -> {})); Exception e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source(b -> b.field("_routing", "foo")))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java new file mode 100644 index 0000000000000..22c419f7f4769 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; + +import java.io.IOException; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class TimeSeriesIdFieldMapperTests extends MetadataMapperTestCase { + + @Override + protected String fieldName() { + return TimeSeriesIdFieldMapper.NAME; + } + + @Override + protected void registerParameters(ParameterChecker checker) throws IOException { + // There aren't any parameters + } + + public void testEnabledInTimeSeriesMode() throws Exception { + DocumentMapper docMapper = createMapperService( + getIndexSettingsBuilder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + mapping(b -> {}) + ).documentMapper(); + + ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", "value") + .endObject()), + XContentType.JSON, null, new BytesArray("tsid"), Map.of())); + + assertThat(doc.rootDoc().getBinaryValue("_tsid"), equalTo(new BytesRef("tsid"))); + assertThat(doc.rootDoc().get("field"), equalTo("value")); + } + + public void testDisabledOutsideOfTimeSeriesMode() throws Exception { + DocumentMapper docMapper = createMapperService( + getIndexSettingsBuilder().put(IndexSettings.TIME_SERIES_MODE.getKey(), false).build(), + mapping(b -> {}) + ).documentMapper(); + + ParsedDocument doc = docMapper.parse(new SourceToParse("test", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", "value") + .endObject()), + XContentType.JSON, null, new BytesArray("tsid"), Map.of())); + + assertThat(doc.rootDoc().getBinaryValue("_tsid"), is(nullValue())); + assertThat(doc.rootDoc().get("field"), equalTo("value")); + } + + public void testIncludeInDocumentNotAllowed() throws Exception { + DocumentMapper docMapper = createDocumentMapper(mapping(b -> {})); + Exception e = expectThrows(MapperParsingException.class, + () -> docMapper.parse(source(b -> b.field("_tsid", "foo")))); + + assertThat(e.getCause().getMessage(), + containsString("Field [_tsid] is a metadata field and cannot be added inside a document")); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 9a40777151485..32e8a9076c26d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -327,7 +327,12 @@ private static MappingLookup createMappingLookup(List concreteF RootObjectMapper.Builder builder = new RootObjectMapper.Builder("_doc"); Map runtimeFieldTypes = runtimeFields.stream().collect(Collectors.toMap(RuntimeField::name, r -> r)); builder.setRuntime(runtimeFieldTypes); - Mapping mapping = new Mapping(builder.build(new ContentPath()), new MetadataFieldMapper[0], Collections.emptyMap()); + Mapping mapping = new Mapping( + builder.build(new ContentPath()), + new MetadataFieldMapper[0], + Collections.emptyMap(), + randomBoolean() + ); return MappingLookup.fromMappers(mapping, mappers, Collections.emptyList(), Collections.emptyList()); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 411902daa8d35..7c568f8390150 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -9,15 +9,23 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.Engine.IndexResult; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import java.io.IOException; @@ -33,7 +41,6 @@ public void testGetForUpdate() throws IOException { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .build(); IndexMetadata metadata = IndexMetadata.builder("test") .putMapping("{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") @@ -44,6 +51,7 @@ public void testGetForUpdate() throws IOException { LongSupplier translogInMemorySegmentCount = ((InternalEngine) primary.getEngine()).translogInMemorySegmentsCount::get; long translogInMemorySegmentCountExpected = 0; Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + throwFailure(test); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet = primary.getService().getForUpdate("0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); @@ -53,7 +61,8 @@ public void testGetForUpdate() throws IOException { assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed } - Engine.IndexResult test1 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + Engine.IndexResult test1 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + throwFailure(test1); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); @@ -69,7 +78,8 @@ public void testGetForUpdate() throws IOException { } // now again from the reader - Engine.IndexResult test2 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + Engine.IndexResult test2 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + throwFailure(test2); assertTrue(primary.getEngine().refreshNeeded()); testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); @@ -120,6 +130,7 @@ private void runGetFromTranslogWithOptions(String docToIndex, String sourceOptio LongSupplier translogInMemorySegmentCount = ((InternalEngine) primary.getEngine()).translogInMemorySegmentsCount::get; long translogInMemorySegmentCountExpected = 0; Engine.IndexResult test = indexDoc(primary, "test", "0", docToIndex); + throwFailure(test); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet = primary.getService().getForUpdate("0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); @@ -128,7 +139,8 @@ private void runGetFromTranslogWithOptions(String docToIndex, String sourceOptio assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed } - Engine.IndexResult test1 = indexDoc(primary, "1", docToIndex, XContentType.JSON, "foobar"); + Engine.IndexResult test1 = indexDoc(primary, "1", docToIndex, XContentType.JSON, "foobar", null); + throwFailure(test1); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source() == null ? new byte[0] : testGet1.source(), StandardCharsets.UTF_8), expectedResult); @@ -143,7 +155,8 @@ private void runGetFromTranslogWithOptions(String docToIndex, String sourceOptio assertEquals(searcher.getIndexReader().maxDoc(), 2); } - Engine.IndexResult test2 = indexDoc(primary, "2", docToIndex, XContentType.JSON, "foobar"); + Engine.IndexResult test2 = indexDoc(primary, "2", docToIndex, XContentType.JSON, "foobar", null); + throwFailure(test2); assertTrue(primary.getEngine().refreshNeeded()); GetResult testGet2 = primary.getService().get("2", new String[]{"foo"}, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE); @@ -160,12 +173,12 @@ private void runGetFromTranslogWithOptions(String docToIndex, String sourceOptio assertEquals(searcher.getIndexReader().maxDoc(), 3); } - testGet2 = primary.getService().get("2", new String[]{"foo"}, true, 1, VersionType.INTERNAL, + GetResult testGet3 = primary.getService().get("2", new String[]{"foo"}, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE); - assertEquals(new String(testGet2.source() == null ? new byte[0] : testGet2.source(), StandardCharsets.UTF_8), expectedResult); - assertTrue(testGet2.getFields().containsKey(RoutingFieldMapper.NAME)); - assertTrue(testGet2.getFields().containsKey("foo")); - assertEquals(expectedFooVal, testGet2.getFields().get("foo").getValue()); + assertEquals(new String(testGet3.source() == null ? new byte[0] : testGet3.source(), StandardCharsets.UTF_8), expectedResult); + assertTrue(testGet3.getFields().containsKey(RoutingFieldMapper.NAME)); + assertTrue(testGet3.getFields().containsKey("foo")); + assertEquals(expectedFooVal, testGet3.getFields().get("foo").getValue()); assertEquals(translogInMemorySegmentCountExpected, translogInMemorySegmentCount.getAsLong()); closeShards(primary); @@ -190,4 +203,100 @@ public void testTypelessGetForUpdate() throws IOException { closeShards(shard); } + + public void testGetForUpdateTimeSeriesMode() throws IOException { + IndexShard primary = setupTimeSeriesShard(); + BytesReference timeSeriesId = new BytesArray("tsid"); + + LongSupplier translogInMemorySegmentCount = ((InternalEngine) primary.getEngine()).translogInMemorySegmentsCount::get; + long translogInMemorySegmentCountExpected = 0; + Engine.IndexResult test = indexDoc(primary, "0", "{\"foo\" : \"bar\"}", XContentType.JSON, null, timeSeriesId); + throwFailure(test); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().getForUpdate("0", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet.getFields().containsKey(TimeSeriesIdFieldMapper.NAME)); // We don't fetch the tsid + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + assertEquals(translogInMemorySegmentCountExpected, translogInMemorySegmentCount.getAsLong()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, timeSeriesId); + throwFailure(test1); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().getForUpdate("1", UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(TimeSeriesIdFieldMapper.NAME)); // We don't fetch the tsid + assertEquals(translogInMemorySegmentCountExpected, translogInMemorySegmentCount.getAsLong()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.getIndexReader().maxDoc(), 1); // we read from the translog + } + closeShards(primary); + } + + public void testGetTimeSeriesMode() throws IOException { + IndexShard primary = setupTimeSeriesShard(); + BytesReference timeSeriesId = new BytesArray("tsid"); + + LongSupplier translogInMemorySegmentCount = ((InternalEngine) primary.getEngine()).translogInMemorySegmentsCount::get; + Engine.IndexResult test = indexDoc(primary, "0", "{\"foo\" : \"bar\"}", XContentType.JSON, null, timeSeriesId); + throwFailure(test); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().get("0", new String[]{"foo"}, true, 1, VersionType.INTERNAL, + FetchSourceContext.FETCH_SOURCE); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet.getFields().containsKey(TimeSeriesIdFieldMapper.NAME)); // We don't fetch the tsid + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + assertEquals(0, translogInMemorySegmentCount.getAsLong()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.getIndexReader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, timeSeriesId); + throwFailure(test1); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().get("1", new String[]{"foo"}, true, 1, VersionType.INTERNAL, + FetchSourceContext.FETCH_SOURCE); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(TimeSeriesIdFieldMapper.NAME)); // We don't fetch the tsid + assertEquals(1, translogInMemorySegmentCount.getAsLong()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.getIndexReader().maxDoc(), 1); // we read from the translog + } + closeShards(primary); + } + + private IndexShard setupTimeSeriesShard() throws IOException { + Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.TIME_SERIES_MODE.getKey(), true) + .build(); + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + { + mapping.startObject("properties"); + { + mapping.startObject("foo").field("type", "keyword").field("dimension", true).endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + IndexMetadata metadata = IndexMetadata.builder("test") + .putMapping(Strings.toString(mapping)) + .settings(settings) + .primaryTerm(0, 1) + .build(); + IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null); + recoverShardFromStore(primary); + return primary; + } + + private void throwFailure(IndexResult result) { + if (result.getFailure() != null) { + throw new AssertionError(result.getFailure()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index de7073d3cc15b..3e91b2a853861 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.InternalEngine; @@ -334,7 +333,7 @@ public void testThrottling() throws Exception { public void testTranslogRecoveryWorksWithIMC() throws IOException { IndexShard shard = newStartedShard(true); for (int i = 0; i < 100; i++) { - indexDoc(shard, Integer.toString(i), "{\"foo\" : \"bar\"}", XContentType.JSON, null); + indexDoc(shard, Integer.toString(i), "{\"foo\" : \"bar\"}"); } shard.close("simon says", false); AtomicReference shardRef = new AtomicReference<>(); diff --git a/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java b/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java index 934ba51771b71..3e8bf2e9911b1 100644 --- a/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java @@ -29,6 +29,7 @@ import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.sameInstance; public class TimeSeriesIdGeneratorServiceTests extends ESTestCase { @@ -43,7 +44,7 @@ public void testNonTimeSeries() { im -> { throw new AssertionError("shouldn't be called"); } ) ) { - Metadata meta = Metadata.builder().put(index("index", false)).build(); + Metadata meta = Metadata.builder().put(index("index", false, "{}")).build(); genService.applyClusterState(meta); assertNull(genService.apply(meta.index("index"))); genService.stop(); @@ -54,7 +55,7 @@ public void testNonTimeSeries() { * Assert that a local time series index loads the time series from the local lookup. */ public void testLocalIndex() { - Metadata meta = Metadata.builder().put(index("index", true)).build(); + Metadata meta = Metadata.builder().put(index("index", true, "{}")).build(); IndexMetadata indexMetadata = meta.index("index"); TimeSeriesIdGenerator gen = mockGenerator(); try (TimeSeriesIdGeneratorService genService = genService(i -> new LocalIndex() { @@ -74,6 +75,29 @@ public TimeSeriesIdGenerator generator() { } } + /** + * Assert that two local indices with different mappings both load their data from the local lookup. + */ + public void testTwoLocalIndices() { + Metadata meta = Metadata.builder().put(index("index_1", true, "{}")).put(index("index_2", true, "{\"foo\": \"bar\"}")).build(); + try (TimeSeriesIdGeneratorService genService = genService(i -> new LocalIndex() { + @Override + public long metadataVersion() { + return meta.index(i).getVersion(); + } + + @Override + public TimeSeriesIdGenerator generator() { + return mockGenerator(); + } + }, im -> { throw new AssertionError("shouldn't be called"); })) { + genService.applyClusterState(meta); + assertThat(genService.apply(meta.index("index_1")), not(sameInstance(genService.apply(meta.index("index_2"))))); + genService.stop(); + } + } + + /** * Assert that a local time series index will reuse the previous building if * the mapping hasn't changed. @@ -82,7 +106,7 @@ public void testLocalIndexUnchangedMapping() { TimeSeriesIdGenerator gen = mockGenerator(); AtomicLong counter = new AtomicLong(); - Metadata meta = Metadata.builder().put(index("index", true)).build(); + Metadata meta = Metadata.builder().put(index("index", true, "{}")).build(); AtomicReference indexMetadata = new AtomicReference<>(meta.index("index")); try (TimeSeriesIdGeneratorService genService = genService(i -> { counter.incrementAndGet(); @@ -120,12 +144,23 @@ public TimeSeriesIdGenerator generator() { * Assert that a non local time series index will build its {@link TimeSeriesIdGenerator}. */ public void testNonLocalIndex() throws Exception { - Metadata meta = Metadata.builder().put(index("index", true)).build(); - IndexMetadata indexMetadata = meta.index("index"); + Metadata meta = Metadata.builder().put(index("index", true, "{}")).build(); TimeSeriesIdGenerator gen = mockGenerator(); try (TimeSeriesIdGeneratorService genService = genService(i -> null, im -> gen)) { genService.applyClusterState(meta); - assertBusy(() -> assertThat(genService.apply(indexMetadata), sameInstance(gen))); + assertThat(genService.apply(meta.index("index")), sameInstance(gen)); + genService.stop(); + } + } + + /** + * Assert two indices with different mappings build their own {@link TimeSeriesIdGenerator}. + */ + public void testTwoNonLocalIndices() throws Exception { + Metadata meta = Metadata.builder().put(index("index_1", true, "{}")).put(index("index_2", true, "{\"foo\": \"bar\"}")).build(); + try (TimeSeriesIdGeneratorService genService = genService(i -> null, im -> mockGenerator())) { + genService.applyClusterState(meta); + assertThat(genService.apply(meta.index("index_1")), not(sameInstance(genService.apply(meta.index("index_2"))))); genService.stop(); } } @@ -138,7 +173,7 @@ public void testNonLocalIndexUnchangedMapping() throws Exception { TimeSeriesIdGenerator gen = mockGenerator(); AtomicLong counter = new AtomicLong(); - Metadata meta = Metadata.builder().put(index("index", true)).build(); + Metadata meta = Metadata.builder().put(index("index", true, "{}")).build(); AtomicReference indexMetadata = new AtomicReference<>(meta.index("index")); try (TimeSeriesIdGeneratorService genService = genService(i -> null, im -> { counter.incrementAndGet(); @@ -146,7 +181,7 @@ public void testNonLocalIndexUnchangedMapping() throws Exception { })) { for (int i = 0; i < 1000; i++) { genService.applyClusterState(meta); - assertBusy(() -> assertThat(genService.apply(indexMetadata.get()), sameInstance(gen))); + assertThat(genService.apply(indexMetadata.get()), sameInstance(gen)); assertThat(counter.get(), equalTo(1L)); } @@ -156,7 +191,7 @@ public void testNonLocalIndexUnchangedMapping() throws Exception { .build(); indexMetadata.set(meta.index("index")); genService.applyClusterState(meta); - assertBusy(() -> assertThat(genService.apply(indexMetadata.get()), sameInstance(gen))); + assertThat(genService.apply(indexMetadata.get()), sameInstance(gen)); assertThat(counter.get(), equalTo(2L)); genService.stop(); } @@ -169,7 +204,7 @@ public void testNonLocalIndexUnchangedMapping() throws Exception { public void testNonLocalIndexSameMappingAsLocalIndex() throws Exception { TimeSeriesIdGenerator gen = mockGenerator(); - Metadata meta = Metadata.builder().put(index("index_1", true)).put(index("index_2", true)).build(); + Metadata meta = Metadata.builder().put(index("index_1", true, "{}")).put(index("index_2", true, "{}")).build(); try (TimeSeriesIdGeneratorService genService = genService(i -> { if (i.getName().equals("index_1")) { return new LocalIndex() { @@ -192,6 +227,47 @@ public TimeSeriesIdGenerator generator() { } } + /** + * An index in time series mode with a null mapping should return an + * "empty" tsid generator. These indices are allowed, but you can't + * put any document into them until they have a mapping. + */ + public void testNullMapping() { + try ( + TimeSeriesIdGeneratorService genService = genService( + i -> { throw new AssertionError("shouldn't be called"); }, + im -> { throw new AssertionError("shouldn't be called"); } + ) + ) { + Metadata meta = Metadata.builder().put(index("index", true, null)).build(); + genService.applyClusterState(meta); + assertThat(genService.apply(meta.index("index")), sameInstance(TimeSeriesIdGenerator.EMPTY)); + genService.stop(); + } + } + + /** + * Attempting to fetch a generator for an index with a newer mapping + * fails. In production the service will always have a newer version + * of the mapping then the rest of ES. + */ + public void testOutOfOrderMeta() { + try ( + TimeSeriesIdGeneratorService genService = genService( + i -> { throw new AssertionError("shouldn't be called"); }, + im -> { throw new AssertionError("shouldn't be called"); } + ) + ) { + Metadata meta = Metadata.builder().put(index("index", true, null)).build(); + genService.applyClusterState(meta); + IndexMetadata prev = meta.index("index"); + IndexMetadata next = IndexMetadata.builder(prev).mappingVersion(prev.getMappingVersion() + 1).build(); + Exception e = expectThrows(IllegalStateException.class, () -> genService.apply(next)); + assertThat(e.getMessage(), equalTo("Got a newer version of the index than the time series id generator [2] vs [1]")); + genService.stop(); + } + } + private TimeSeriesIdGeneratorService genService( Function lookupLocalIndex, Function buildTimeSeriedIdGenerator @@ -202,19 +278,22 @@ private TimeSeriesIdGeneratorService genService( return genService; } - private IndexMetadata.Builder index(String index, boolean timeSeriesMode) { + private IndexMetadata.Builder index(String index, boolean timeSeriesMode, String mapping) { Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT); if (timeSeriesMode) { settings.put(IndexSettings.TIME_SERIES_MODE.getKey(), true); } - return IndexMetadata.builder(index) + IndexMetadata.Builder builder = IndexMetadata.builder(index) .settings(settings) .numberOfShards(between(1, 10)) - .numberOfReplicas(randomInt(20)) - .putMapping("{}"); + .numberOfReplicas(randomInt(20)); + if (mapping != null) { + builder = builder.putMapping(mapping); + } + return builder; } private TimeSeriesIdGenerator mockGenerator() { - return new TimeSeriesIdGenerator(new ObjectComponent(Map.of("a", KeywordFieldMapper.timeSeriesIdGenerator(null)))); + return TimeSeriesIdGenerator.build(new ObjectComponent(Map.of("a", KeywordFieldMapper.timeSeriesIdGenerator(null)))); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java index af41b2728df5e..46408c2f99d6d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/TranslogHandler.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceToParse; @@ -30,6 +31,7 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -90,9 +92,16 @@ public Engine.Operation convertToEngineOp(Translog.Operation operation, Engine.O case INDEX: final Translog.Index index = (Translog.Index) operation; final String indexName = mapperService.index().getName(); + final Function sourceSource = SourceToParse.parseTimeSeriesIdFromSource( + indexName, + index.id(), + index.source(), + XContentHelper.xContentType(index.source()), + index.routing(), + Map.of() + ); final Engine.Index engineIndex = IndexShard.prepareIndex(mapperService, - new SourceToParse(indexName, index.id(), index.source(), XContentHelper.xContentType(index.source()), - index.routing(), Map.of()), index.seqNo(), index.primaryTerm(), + sourceSource, index.seqNo(), index.primaryTerm(), index.version(), versionType, origin, index.getAutoGeneratedIdTimestamp(), true, SequenceNumbers.UNASSIGNED_SEQ_NO, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); return engineIndex; diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index f341ae905ce0b..a38e8de3989b5 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -18,8 +18,6 @@ import org.apache.lucene.store.Directory; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.core.CheckedConsumer; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; @@ -34,6 +32,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -99,6 +99,10 @@ protected Settings getIndexSettings() { return SETTINGS; } + protected final Settings.Builder getIndexSettingsBuilder() { + return Settings.builder().put(getIndexSettings()); + } + protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) { return createIndexAnalyzers(); } @@ -232,18 +236,20 @@ protected final SourceToParse source(CheckedConsumer build, @Nullable String routing) throws IOException { - XContentBuilder builder = JsonXContent.contentBuilder().startObject(); - build.accept(builder); - builder.endObject(); - return new SourceToParse("test", id, BytesReference.bytes(builder), XContentType.JSON, routing, Map.of()); + return source(id, build, routing, null, Map.of()); } - protected final SourceToParse source(String id, CheckedConsumer build, - @Nullable String routing, Map dynamicTemplates) throws IOException { + protected final SourceToParse source( + String id, + CheckedConsumer build, + @Nullable String routing, + @Nullable BytesReference timeSeriesId, + Map dynamicTemplates + ) throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().startObject(); build.accept(builder); builder.endObject(); - return new SourceToParse("test", id, BytesReference.bytes(builder), XContentType.JSON, routing, dynamicTemplates); + return new SourceToParse("test", id, BytesReference.bytes(builder), XContentType.JSON, routing, timeSeriesId, dynamicTemplates); } protected final SourceToParse source(String source) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index fe7552862f35b..497e2cb7d17bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -28,6 +28,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -458,6 +459,17 @@ protected IndexShard newStartedShard() throws IOException { return newStartedShard(randomBoolean()); } + /** + * Creates a new empty shard in time series mode and starts it. + */ + protected IndexShard newStartedTimeSeriesShard(boolean primary) throws IOException { + return newStartedShard( + primary, + Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + new InternalEngineFactory() + ); + } + /** * Creates a new empty shard and starts it * @param settings the settings to use for this shard @@ -728,14 +740,14 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source) throws IOException { - return indexDoc(shard, id, source, XContentType.JSON, null); + return indexDoc(shard, id, source, XContentType.JSON, null, null); } protected Engine.IndexResult indexDoc(IndexShard shard, String id, String source, XContentType xContentType, - String routing) + @Nullable String routing, @Nullable BytesReference timeSeriesId) throws IOException { SourceToParse sourceToParse = new SourceToParse( - shard.shardId().getIndexName(), id, new BytesArray(source), xContentType, routing, Map.of()); + shard.shardId().getIndexName(), id, new BytesArray(source), xContentType, routing, timeSeriesId, Map.of()); Engine.IndexResult result; if (shard.routingEntry().primary()) { result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index b64f21ce27a53..096aec14f96c0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -229,15 +229,19 @@ private XContentBuilder generateSource(long id, Random random) throws IOExceptio int tokenLength = RandomNumbers.randomIntBetween(random, 1, Math.min(contentLength - text.length(), 10)); text.append(" ").append(RandomStrings.randomRealisticUnicodeOfCodepointLength(random, tokenLength)); } - XContentBuilder builder = XContentFactory.smileBuilder(); - builder.startObject().field("test", "value" + id) - .field("text", text.toString()) - .field("id", id) - .endObject(); - return builder; - + XContentBuilder builder = XContentFactory.smileBuilder().startObject(); + builder.field("test", "value" + id); + builder.field("text", text.toString()); + builder.field("id", id); + extraSource(builder); + return builder.endObject(); } + /** + * Hook for subclasses to add extra entries to the source. + */ + protected void extraSource(XContentBuilder builder) throws IOException {} + private volatile TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; public void setRequestTimeout(TimeValue timeout) { diff --git a/x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml b/x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml new file mode 100644 index 0000000000000..c976d0f6f7097 --- /dev/null +++ b/x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml @@ -0,0 +1 @@ +# NOCOMMIT test tsdb with ccr diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java index cf224e3582ce1..a608d23aad9c7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotShardTests.java @@ -35,12 +35,14 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; @@ -65,8 +67,8 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.ShardGenerations; -import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.ShardSnapshotResult; +import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; import org.elasticsearch.repositories.fs.FsRepository; @@ -124,10 +126,12 @@ public void testSourceIncomplete() throws IOException { } public void testIncrementalSnapshot() throws IOException { - IndexShard shard = newStartedShard(); + boolean timeSeriesMode = randomBoolean(); + BytesReference timeSeriesId = timeSeriesMode ? new BytesArray("tsid") : null; + IndexShard shard = timeSeriesMode ? newStartedTimeSeriesShard(randomBoolean()) : newStartedShard(); for (int i = 0; i < 10; i++) { final String id = Integer.toString(i); - indexDoc(shard, "_doc", id); + indexDoc(shard, id, "{}", XContentType.JSON, null, timeSeriesId); } IndexId indexId = new IndexId(shard.shardId().getIndexName(), shard.shardId().getIndex().getUUID()); @@ -149,8 +153,8 @@ public void testIncrementalSnapshot() throws IOException { assertEquals(copy.getStage(), IndexShardSnapshotStatus.Stage.DONE); } - indexDoc(shard, "_doc", Integer.toString(10)); - indexDoc(shard, "_doc", Integer.toString(11)); + indexDoc(shard, Integer.toString(10), "{}", XContentType.JSON, null, timeSeriesId); + indexDoc(shard, Integer.toString(11), "{}", XContentType.JSON, null, timeSeriesId); try (Engine.IndexCommitRef snapshotRef = shard.acquireLastIndexCommit(true)) { SnapshotId snapshotId = new SnapshotId("test_1", "test_1"); @@ -191,12 +195,14 @@ private String randomDoc() { return "{ \"value\" : \"" + randomAlphaOfLength(10) + "\"}"; } - public void testRestoreMinmal() throws IOException { - IndexShard shard = newStartedShard(true); + public void testRestoreMinimal() throws IOException { + boolean timeSeriesMode = randomBoolean(); + BytesReference timeSeriesId = timeSeriesMode ? new BytesArray("tsid") : null; + IndexShard shard = timeSeriesMode ? newStartedTimeSeriesShard(true) : newStartedShard(true); int numInitialDocs = randomIntBetween(10, 100); for (int i = 0; i < numInitialDocs; i++) { final String id = Integer.toString(i); - indexDoc(shard, id, randomDoc()); + indexDoc(shard, id, randomDoc(), XContentType.JSON, null, timeSeriesId); if (randomBoolean()) { shard.refresh("test"); } @@ -207,7 +213,7 @@ public void testRestoreMinmal() throws IOException { if (rarely()) { deleteDoc(shard, id); } else { - indexDoc(shard, id, randomDoc()); + indexDoc(shard, id, randomDoc(), XContentType.JSON, null, timeSeriesId); } } if (frequently()) { @@ -296,8 +302,11 @@ public void testRestoreMinmal() throws IOException { previous = current; } expectThrows(UnsupportedOperationException.class, () -> searcher.search(new TermQuery(new Term("boom", "boom")), 1)); - targetShard = reindex(searcher.getDirectoryReader(), new MappingMetadata("_doc", - restoredShard.mapperService().documentMapper().mapping().getMeta())); + targetShard = reindex( + searcher.getDirectoryReader(), + new MappingMetadata("_doc", restoredShard.mapperService().documentMapper().mapping().getMeta()), + timeSeriesId + ); } for (int i = 0; i < numInitialDocs; i++) { @@ -319,7 +328,7 @@ public void testRestoreMinmal() throws IOException { closeShards(shard, restoredShard, targetShard); } - public IndexShard reindex(DirectoryReader reader, MappingMetadata mapping) throws IOException { + public IndexShard reindex(DirectoryReader reader, MappingMetadata mapping, BytesReference timeSeriesId) throws IOException { ShardRouting targetShardRouting = TestShardRouting.newShardRouting(new ShardId("target", "_na_", 0), randomAlphaOfLength(10), true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) @@ -347,10 +356,24 @@ public IndexShard reindex(DirectoryReader reader, MappingMetadata mapping) throw String id = rootFieldsVisitor.id(); BytesReference source = rootFieldsVisitor.source(); assert source != null : "_source is null but should have been filtered out at snapshot time"; - Engine.Result result = targetShard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, - new SourceToParse(index, id, source, XContentHelper.xContentType(source), - rootFieldsVisitor.routing(), Map.of()), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false); + SourceToParse sourceToParse = new SourceToParse( + index, + id, + source, + XContentHelper.xContentType(source), + rootFieldsVisitor.routing(), + timeSeriesId, + Map.of() + ); + Engine.Result result = targetShard.applyIndexOperationOnPrimary( + Versions.MATCH_ANY, + VersionType.INTERNAL, + sourceToParse, + SequenceNumbers.UNASSIGNED_SEQ_NO, + 0, + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, + false + ); if (result.getResultType() != Engine.Result.Type.SUCCESS) { throw new IllegalStateException("failed applying post restore operation result: " + result .getResultType(), result.getFailure()); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml new file mode 100644 index 0000000000000..d393f3b02508f --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml @@ -0,0 +1 @@ +# NOCOMMIT smoke test tsdb \ No newline at end of file From 95b05bf552aca4951d5de6d9b549284abe013717 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 30 Jul 2021 10:08:17 -0400 Subject: [PATCH 07/29] Tests --- .../upgrades/FullClusterRestartIT.java | 109 +++++++++- qa/rolling-upgrade/build.gradle | 6 + .../upgrades/AbstractRollingTestCase.java | 8 +- .../elasticsearch/upgrades/IndexingIT.java | 135 +++++++++++- .../elasticsearch/upgrades/RecoveryIT.java | 1 - .../test/mixed_cluster/10_basic.yml | 2 + .../rest-api-spec/test/tsdb/10_search.yml | 16 +- .../test/tsdb/70_index_resize.yml | 16 +- .../xpack/ccr/FollowIndexIT.java | 122 +++++++++++ .../resources/rest-api-spec/test/ccr/tsdb.yml | 1 - .../test/security/authz/70_tsdb.yml | 193 +++++++++++++++++- 11 files changed, 572 insertions(+), 37 deletions(-) delete mode 100644 x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 12aba56fa2190..ec63518158a7f 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.XContentTestUtils; @@ -45,6 +46,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -151,7 +153,7 @@ public void testSearch() throws Exception { assertStoredBinaryFields(count); } - public void testNewReplicasWork() throws Exception { + public void testNewReplicas() throws Exception { if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); @@ -189,7 +191,7 @@ public void testNewReplicasWork() throws Exception { logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); - Response response = client().performRequest(setNumberOfReplicas); + client().performRequest(setNumberOfReplicas); ensureGreenLongWait(index); @@ -210,6 +212,91 @@ public void testNewReplicasWork() throws Exception { } } + public void testSearchTimeSeriesMode() throws Exception { + assumeTrue("time series mode introduced in 8.0.0 to be backported to 7.15.0", getOldClusterVersion().onOrAfter(Version.V_8_0_0)); + int numDocs; + if (isRunningAgainstOldCluster()) { + numDocs = createTimeSeriesModeIndex(1); + } else { + numDocs = countOfIndexedRandomDocuments(); + } + assertCountAll(numDocs); + } + + public void testNewReplicasTimeSeriesMode() throws Exception { + assumeTrue("time series mode introduced in 8.0.0 to be backported to 7.15.0", getOldClusterVersion().onOrAfter(Version.V_8_0_0)); + if (isRunningAgainstOldCluster()) { + createTimeSeriesModeIndex(0); + } else { + final int numReplicas = 1; + final long startTime = System.currentTimeMillis(); + logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); + Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); + setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); + client().performRequest(setNumberOfReplicas); + + ensureGreenLongWait(index); + + logger.debug("--> index [{}] is green, took [{}] ms", index, (System.currentTimeMillis() - startTime)); + Map recoverRsp = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_recovery"))); + logger.debug("--> recovery status:\n{}", recoverRsp); + + Set counts = new HashSet<>(); + for (String node : dataNodes(index, client())) { + Request search = new Request("GET", "/" + index + "/_search"); + search.addParameter("preference", "_only_nodes:" + node); + Map responseBody = entityAsMap(client().performRequest(search)); + assertNoFailures(responseBody); + int hits = extractTotalHits(responseBody); + counts.add(hits); + } + assertEquals("All nodes should have a consistent number of documents", 1, counts.size()); + } + } + + private int createTimeSeriesModeIndex(int replicas) throws IOException { + XContentBuilder mappingsAndSettings = jsonBuilder(); + mappingsAndSettings.startObject(); + { + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("number_of_shards", 1); + mappingsAndSettings.field("number_of_replicas", replicas); + mappingsAndSettings.field("time_series_mode", true); + mappingsAndSettings.endObject(); + } + { + mappingsAndSettings.startObject("mappings"); + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("@timestamp").field("type", "date").endObject(); + mappingsAndSettings.startObject("dim").field("type", "keyword").field("dimension", true).endObject(); + } + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + mappingsAndSettings.endObject(); + + Request createIndex = new Request("PUT", "/" + index); + createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + client().performRequest(createIndex); + + int numDocs = randomIntBetween(2000, 3000); + long basetime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2021-01-01T00:00:00Z"); + indexRandomDocuments( + numDocs, + true, + true, + i -> JsonXContent.contentBuilder() + .startObject() + .field("@timestamp", basetime + TimeUnit.MINUTES.toMillis(i)) + .field("dim", "value") + .endObject() + ); + logger.info("Refreshing [{}]", index); + client().performRequest(new Request("POST", "/" + index + "/_refresh")); + return numDocs; + } + public void testClusterState() throws Exception { if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -459,14 +546,18 @@ public void testRollover() throws IOException { assertEquals(expectedCount, extractTotalHits(count)); } + void assertCountAll(int count) throws IOException { + Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); + assertNoFailures(response); + int numDocs = extractTotalHits(response); + logger.info("Found {} in old index", numDocs); + assertEquals(count, numDocs); + } + void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing basic search"); { - Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); - assertNoFailures(response); - int numDocs = extractTotalHits(response); - logger.info("Found {} in old index", numDocs); - assertEquals(count, numDocs); + assertCountAll(count); } logger.info("--> testing basic search with sort"); @@ -1136,8 +1227,6 @@ private void checkSnapshot(final String snapshotName, final int count, final Ver } } - // TODO tests for upgrades after shrink. We've had trouble with shrink in the past. - private void indexRandomDocuments( final int count, final boolean flushAllowed, @@ -1629,6 +1718,4 @@ public static void assertNumHits(String index, int numHits, int totalShards) thr assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards)); assertThat(extractTotalHits(resp), equalTo(numHits)); } - - // NOCOMMIT tsdb smoke test } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 5ef69066ac780..b4963d7dad7b8 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -42,6 +42,8 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { } } + String oldVersion = bwcVersion.toString().replace('-SNAPSHOT', '') + tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { dependsOn "processTestResources" useCluster testClusters."${baseName}" @@ -50,6 +52,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { delete("${buildDir}/cluster/shared/repo/${baseName}") } systemProperty 'tests.rest.suite', 'old_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") } @@ -61,6 +64,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { testClusters."${baseName}".nextNodeToNextVersion() } systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'true' nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") @@ -73,6 +77,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { testClusters."${baseName}".nextNodeToNextVersion() } systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion systemProperty 'tests.first_round', 'false' nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") @@ -85,6 +90,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { } useCluster testClusters."${baseName}" systemProperty 'tests.rest.suite', 'upgraded_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 9e9081f54028b..26de71b9ba629 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.upgrades; +import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; @@ -24,14 +25,15 @@ public static ClusterType parse(String value) { return MIXED; case "upgraded_cluster": return UPGRADED; - default: - throw new AssertionError("unknown cluster type: " + value); + default: + throw new AssertionError("unknown cluster type: " + value); } } } protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); - protected static final boolean firstMixedRound = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); + protected static final boolean FIRST_MIXED_ROUND = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); + protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); @Override protected final boolean preserveIndicesUponCompletion() { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index c99e2412813b4..ef2f70c6bad8e 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -7,23 +7,33 @@ */ package org.elasticsearch.upgrades; +import io.github.nik9000.mapmatcher.ListMatcher; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.core.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Booleans; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.hamcrest.Matcher; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; +import static io.github.nik9000.mapmatcher.ListMatcher.matchesList; +import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; +import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; @@ -228,12 +238,131 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio b.append("{\"index\": {\"_index\": \"").append(index).append("\"}}\n"); b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); } + bulk(index, b.toString()); + } + + private static final List TSDB_DIMS = List.of("6a841a21", "947e4ced", "a4c385a1", "b47a2f4e", "df3145b3"); + private static final long[] TSDB_TIMES; + static { + String[] times = new String[] { + "2021-01-01T00:00:00Z", + "2021-01-02T00:00:00Z", + "2021-01-02T00:10:00Z", + "2021-01-02T00:20:00Z", + "2021-01-02T00:30:00Z" }; + TSDB_TIMES = new long[times.length]; + for (int i = 0; i < times.length; i++) { + TSDB_TIMES[i] = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(times[i]); + } + } + + public void testTsdb() throws IOException { + assumeTrue("tsdb added in 8.0.0 to be backported to 7.15.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_0_0)); + + StringBuilder bulk = new StringBuilder(); + switch (CLUSTER_TYPE) { + case OLD: + createTsdbIndex(); + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[0], TSDB_TIMES[1], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[0], TSDB_TIMES[1], -0.1); + bulk("tsdb", bulk.toString()); + assertTsdbAgg(closeTo(215.95, 0.005), closeTo(-215.95, 0.005)); + return; + case MIXED: + if (FIRST_MIXED_ROUND) { + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[1], TSDB_TIMES[2], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[1], TSDB_TIMES[2], -0.1); + tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[0], TSDB_TIMES[2], 1.1); + bulk("tsdb", bulk.toString()); + assertTsdbAgg(closeTo(217.45, 0.005), closeTo(-217.45, 0.005), closeTo(2391.95, 0.005)); + return; + } + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[2], TSDB_TIMES[3], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[2], TSDB_TIMES[3], -0.1); + tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[2], TSDB_TIMES[3], 1.1); + tsdbBulk(bulk, TSDB_DIMS.get(3), TSDB_TIMES[0], TSDB_TIMES[3], 10); + bulk("tsdb", bulk.toString()); + assertTsdbAgg(closeTo(218.95, 0.005), closeTo(-218.95, 0.005), closeTo(2408.45, 0.005), closeTo(21895, 0.5)); + return; + case UPGRADED: + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[3], TSDB_TIMES[4], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[3], TSDB_TIMES[4], -0.1); + tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[3], TSDB_TIMES[4], 1.1); + tsdbBulk(bulk, TSDB_DIMS.get(3), TSDB_TIMES[3], TSDB_TIMES[4], 10); + tsdbBulk(bulk, TSDB_DIMS.get(4), TSDB_TIMES[0], TSDB_TIMES[4], -5); + bulk("tsdb", bulk.toString()); + assertTsdbAgg( + closeTo(220.45, 0.005), + closeTo(-220.45, 0.005), + closeTo(2424.95, 0.005), + closeTo(22045, 0.5), + closeTo(-11022.5, 0.5) + ); + return; + } + } + + private void bulk(String index, String entity) throws IOException { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - bulk.setJsonEntity(b.toString()); + bulk.setJsonEntity(entity.toString()); client().performRequest(bulk); } + private void createTsdbIndex() throws IOException { + Request createIndex = new Request("PUT", "/tsdb"); + XContentBuilder indexSpec = XContentBuilder.builder(XContentType.JSON.xContent()).startObject(); + indexSpec.startObject("mappings").startObject("properties"); + { + indexSpec.startObject("@timestamp").field("type", "date").endObject(); + indexSpec.startObject("dim").field("type", "keyword").field("dimension", true).endObject(); + } + indexSpec.endObject().endObject(); + indexSpec.startObject("settings").field("time_series_mode", true).endObject(); + createIndex.setJsonEntity(Strings.toString(indexSpec.endObject())); + client().performRequest(createIndex); + } + + private void tsdbBulk(StringBuilder bulk, String dim, long timeStart, long timeEnd, double rate) throws IOException { + long delta = TimeUnit.SECONDS.toMillis(20); + double value = (timeStart - TSDB_TIMES[0]) / TimeUnit.SECONDS.toMillis(20) * rate; + for (long t = timeStart; t < timeEnd; t += delta) { + bulk.append("{\"index\": {\"_index\": \"tsdb\"}}\n"); + bulk.append("{\"@timestamp\": ").append(t); + bulk.append(", \"dim\": \"").append(dim).append("\""); + bulk.append(", \"value\": ").append(value).append("}\n"); + value += rate; + } + } + + private void assertTsdbAgg(Matcher... expected) throws IOException { + Request request = new Request("POST", "/tsdb/_search"); + request.addParameter("size", "0"); + XContentBuilder body = JsonXContent.contentBuilder().startObject(); + body.startObject("aggs").startObject("tsids"); + { + body.startObject("terms").field("field", "_tsid").endObject(); + body.startObject("aggs").startObject("avg"); + { + body.startObject("avg").field("field", "value").endObject(); + } + body.endObject().endObject(); + } + body.endObject().endObject(); + request.setJsonEntity(Strings.toString(body.endObject())); + ListMatcher tsidsExpected = matchesList(); + for (int d = 0; d < expected.length; d++) { + tsidsExpected = tsidsExpected.item( + matchesMap().extraOk().entry("key", Map.of("dim", TSDB_DIMS.get(d))).entry("avg", Map.of("value", expected[d])) + ); + } + assertMap( + entityAsMap(client().performRequest(request)), + matchesMap().extraOk() + .entry("aggregations", matchesMap().entry("tsids", matchesMap().extraOk().entry("buckets", tsidsExpected))) + ); + } + private void assertCount(String index, int count) throws IOException { Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); @@ -258,6 +387,4 @@ private Version minNodeVersion() throws IOException { } return minNodeVersion; } - - // NOCOMMIT tsdb smoke test } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 41996f2b9e434..ae26fc78373a1 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -380,7 +380,6 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); break; - case UPGRADED: ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 806de5d4d987e..216c1d4b06e4c 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -74,6 +74,7 @@ nodes.usage: {} - is_true: nodes - match: { _nodes.failed: 0 } + --- "Get index works": - do: @@ -106,3 +107,4 @@ - match: {index_templates.0.index_template.template.aliases.test_blias.index_routing: "b" } - match: {index_templates.0.index_template.template.aliases.test_blias.search_routing: "b" } - match: {index_templates.0.index_template.template.aliases.test_clias.filter.term.user: "kimchy" } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml index 0ef25302eacc6..5b375b9280782 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml @@ -36,21 +36,21 @@ setup: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' --- "query a dimension": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml index 586783af0d352..30e73a730c11b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml @@ -38,21 +38,21 @@ setup: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T17:53:34.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' - do: indices.put_settings: diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index e0ff2a02fc6bf..88869c8c2f94f 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -15,6 +15,8 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; @@ -23,6 +25,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; +import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -235,6 +239,124 @@ public void testFollowSearchableSnapshotsFails() throws Exception { } } + public void testFollowTsdbIndex() throws Exception { + final int numDocs = 128; + final String leaderIndexName = "tsdb_leader"; + long basetime = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2021-01-01T00:00:00Z"); + if ("leader".equals(targetCluster)) { + logger.info("Running against leader cluster"); + createIndex( + leaderIndexName, + Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + "\"properties\": {\"@timestamp\": {\"type\": \"date\"}, \"dim\": {\"type\": \"keyword\", \"dimension\": true}}" + ); + for (int i = 0; i < numDocs; i++) { + logger.info("Indexing doc [{}]", i); + index( + client(), + leaderIndexName, + Integer.toString(i), + "@timestamp", + basetime + TimeUnit.SECONDS.toMillis(i * 10), + "dim", + "foobar" + ); + } + refresh(leaderIndexName); + verifyDocuments(client(), leaderIndexName, numDocs); + } else if ("follow".equals(targetCluster)) { + logger.info("Running against follow cluster"); + final String followIndexName = "tsdb_follower"; + final boolean overrideNumberOfReplicas = randomBoolean(); + if (overrideNumberOfReplicas) { + followIndex( + client(), + "leader_cluster", + leaderIndexName, + followIndexName, + Settings.builder().put("index.number_of_replicas", 0).build() + ); + } else { + followIndex(leaderIndexName, followIndexName); + } + assertBusy(() -> { + verifyDocuments(client(), followIndexName, numDocs); + if (overrideNumberOfReplicas) { + assertMap( + getIndexSettingsAsMap(followIndexName), + matchesMap().extraOk().entry("index.time_series_mode", "true").entry("index.number_of_replicas", "0") + ); + } else { + assertMap( + getIndexSettingsAsMap(followIndexName), + matchesMap().extraOk().entry("index.time_series_mode", "true").entry("index.number_of_replicas", "1") + ); + } + }); + // unfollow and then follow and then index a few docs in leader index: + pauseFollow(followIndexName); + resumeFollow(followIndexName); + try (RestClient leaderClient = buildLeaderClient()) { + int id = numDocs; + index( + leaderClient, + leaderIndexName, + Integer.toString(id), + "@timestamp", + basetime + TimeUnit.SECONDS.toMillis(id * 10), + "dim", + "foobar" + ); + index( + leaderClient, + leaderIndexName, + Integer.toString(id + 1), + "@timestamp", + basetime + TimeUnit.SECONDS.toMillis(id * 10 + 10), + "dim", + "foobar" + ); + index( + leaderClient, + leaderIndexName, + Integer.toString(id + 2), + "@timestamp", + basetime + TimeUnit.SECONDS.toMillis(id * 10 + 20), + "dim", + "foobar" + ); + } + assertBusy(() -> verifyDocuments(client(), followIndexName, numDocs + 3)); + assertBusy(() -> verifyCcrMonitoring(leaderIndexName, followIndexName), 30, TimeUnit.SECONDS); + + pauseFollow(followIndexName); + closeIndex(followIndexName); + assertOK(client().performRequest(new Request("POST", "/" + followIndexName + "/_ccr/unfollow"))); + Exception e = expectThrows(ResponseException.class, () -> resumeFollow(followIndexName)); + assertThat(e.getMessage(), containsString("follow index [" + followIndexName + "] does not have ccr metadata")); + } + } + + public void testFollowTsdbIndexCanNotOverrideMode() throws Exception { + if (false == "follow".equals(targetCluster)) { + return; + } + logger.info("Running against follow cluster"); + Exception e = expectThrows(ResponseException.class, () -> followIndex( + client(), + "leader_cluster", + "tsdb_leader", + "tsdb_follower_bad", + Settings.builder().put("index.time_series_mode", false).build() + )); + assertThat( + e.getMessage(), + containsString("can not put follower index that could override leader settings {\\\"index.time_series_mode\\\":\\\"false\\\"}") + ); + } + + // TODO can't override tsdb mode setting + @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray())); diff --git a/x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml b/x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml deleted file mode 100644 index c976d0f6f7097..0000000000000 --- a/x-pack/plugin/ccr/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/ccr/tsdb.yml +++ /dev/null @@ -1 +0,0 @@ -# NOCOMMIT test tsdb with ccr diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml index d393f3b02508f..5db603c04c168 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml @@ -1 +1,192 @@ -# NOCOMMIT smoke test tsdb \ No newline at end of file +setup: + - do: + indices.create: + index: test + body: + settings: + index: + time_series_mode: true + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + +--- +document level security on tag: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + features: headers + + - do: + security.put_role: + name: "limitread" + body: > + { + "indices": [ + { + "names": ["*"], + "privileges": ["read"], + "query": {"match": {"k8s.pod.name": "cat"}} + } + ] + } + + - do: + security.put_user: + username: "limited" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "limitread" ], + "full_name" : "user who can read some data" + } + + - do: + headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user + search: + index: test + size: 0 + body: + aggs: + tsids: + terms: + field: _tsid + + - match: { hits.total.value: 4 } + - length: { aggregations.tsids.buckets: 1 } + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + +--- +document level security on dimension: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + features: headers + + - do: + security.put_role: + name: "limitread" + body: > + { + "indices": [ + { + "names": ["*"], + "privileges": ["read"], + "query": {"match": {"k8s.pod.uid": "947e4ced-1786-4e53-9e0c-5c447e959507"}} + } + ] + } + + - do: + security.put_user: + username: "limited" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "limitread" ], + "full_name" : "user who can read some data" + } + + - do: + headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user + search: + index: test + size: 0 + body: + aggs: + tsids: + terms: + field: _tsid + + - match: { hits.total.value: 4 } + - length: { aggregations.tsids.buckets: 1 } + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + +--- +document level security on tsid is not possible: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + features: headers + + # It'd be better if this failed immediately but security doesn't build the + # query until you use it on an index. + - do: + security.put_role: + name: "limitread" + body: > + { + "indices": [ + { + "names": ["*"], + "privileges": ["read"], + "query": {"match": {"_tsid": "doesn't work"}} + } + ] + } + + - do: + security.put_user: + username: "limited" + body: > + { + "password" : "x-pack-test-password", + "roles" : [ "limitread" ], + "full_name" : "user who's role is broken" + } + + - do: + headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user + catch: /Field \[_tsid\] of type \[_tsid\] does not support match queries/ + search: + index: test + size: 0 + body: + aggs: + tsids: + terms: + field: _tsid From cbca5063fd71b7e25a89fd0eaaa0d498cd02c6ba Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 2 Aug 2021 12:54:16 -0400 Subject: [PATCH 08/29] mode=time_series --- .../upgrades/FullClusterRestartIT.java | 2 +- .../elasticsearch/upgrades/IndexingIT.java | 2 +- .../20_tsdb_consistency.yml | 4 +- .../rest-api-spec/test/tsdb/10_search.yml | 2 +- .../rest-api-spec/test/tsdb/20_bad_config.yml | 12 +-- .../test/tsdb/30_unsupported_operations.yml | 2 +- .../test/tsdb/40_invalid_indexing.yml | 6 +- .../test/tsdb/50_add_missing_dimensions.yml | 2 +- .../test/tsdb/60_dimension_types.yml | 6 +- .../test/tsdb/70_index_resize.yml | 2 +- .../test/tsdb/80_term_vectors.yml | 2 +- .../indices/recovery/IndexRecoveryIT.java | 43 +++++----- .../IndexRecoveryInTimeSeriesModeIT.java | 5 +- .../action/bulk/TransportBulkAction.java | 17 ++-- .../action/bulk/TransportShardBulkAction.java | 4 +- .../action/index/IndexRequest.java | 6 +- .../action/update/TransportUpdateAction.java | 7 +- .../cluster/metadata/IndexMetadata.java | 17 ++-- .../common/settings/IndexScopedSettings.java | 2 +- .../common/settings/Setting.java | 23 ++++- .../org/elasticsearch/index/IndexMode.java | 86 +++++++++++++++++++ .../elasticsearch/index/IndexSettings.java | 49 +++-------- .../elasticsearch/index/IndexSortConfig.java | 10 +-- .../index/mapper/DocumentMapper.java | 8 +- .../index/mapper/MapperService.java | 4 +- .../elasticsearch/index/mapper/Mapping.java | 25 ++++-- .../index/mapper/MappingLookup.java | 2 +- .../index/mapper/MappingParser.java | 9 +- .../index/mapper/RoutingFieldMapper.java | 3 +- .../index/mapper/SourceToParse.java | 13 ++- .../index/mapper/TimeSeriesIdFieldMapper.java | 2 +- .../index/termvectors/TermVectorsService.java | 15 ++-- .../indices/TimeSeriesIdGeneratorService.java | 6 +- .../MetadataRolloverServiceTests.java | 12 ++- .../action/bulk/TransportBulkActionTests.java | 14 +-- .../bulk/TransportShardBulkActionTests.java | 22 ++--- .../index/MappingUpdatedActionTests.java | 3 +- .../index/IndexSortSettingsTests.java | 6 +- .../index/mapper/DocumentMapperTests.java | 4 +- .../FieldAliasMapperValidationTests.java | 3 +- .../index/mapper/MappingLookupTests.java | 3 +- .../index/mapper/MappingParserTests.java | 52 +++++++++-- .../mapper/TimeSeriesIdFieldMapperTests.java | 6 +- .../query/SearchExecutionContextTests.java | 3 +- .../index/shard/ShardGetServiceTests.java | 2 +- .../TimeSeriesIdGeneratorServiceTests.java | 2 +- .../index/shard/IndexShardTestCase.java | 2 +- .../xpack/ccr/FollowIndexIT.java | 10 +-- .../test/security/authz/70_tsdb.yml | 2 +- 49 files changed, 342 insertions(+), 202 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/IndexMode.java diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index ec63518158a7f..12a0c7a1ebcab 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -261,7 +261,7 @@ private int createTimeSeriesModeIndex(int replicas) throws IOException { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", replicas); - mappingsAndSettings.field("time_series_mode", true); + mappingsAndSettings.field("mode", "time_series"); mappingsAndSettings.endObject(); } { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index ef2f70c6bad8e..8edebec485326 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -318,7 +318,7 @@ private void createTsdbIndex() throws IOException { indexSpec.startObject("dim").field("type", "keyword").field("dimension", true).endObject(); } indexSpec.endObject().endObject(); - indexSpec.startObject("settings").field("time_series_mode", true).endObject(); + indexSpec.startObject("settings").field("mode", "time_series").endObject(); createIndex.setJsonEntity(Strings.toString(indexSpec.endObject())); client().performRequest(createIndex); } diff --git a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml index 3229c80a49fa1..1b02e0c47d79c 100644 --- a/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml +++ b/qa/smoke-test-multinode/src/test/resources/rest-api-spec/test/smoke_test_multinode/20_tsdb_consistency.yml @@ -1,4 +1,4 @@ -# Test the time_series_mode properly groups by _tsid. If we could put this in +# Test the mode:time_series properly groups by _tsid. If we could put this in # rest-api-spec we would, but it requires painless. setup: @@ -8,7 +8,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series number_of_shards: 3 number_of_replicas: 1 mappings: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml index a2ada2d96b8e5..1d5bd6acadf25 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_search.yml @@ -5,7 +5,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml index dc1d02c4d028b..b157cec072456 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml @@ -11,7 +11,7 @@ body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -34,7 +34,7 @@ body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -57,7 +57,7 @@ body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -90,7 +90,7 @@ sort: field: "@timestamp" order: desc - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -107,13 +107,13 @@ reason: introduced in 8.0.0 to be backported to 7.15.0 - do: - catch: /\[index.time_series_mode\] is incompatible with \[index.routing_partition_size\]/ + catch: /\[index.mode\] is incompatible with \[index.routing_partition_size\]/ indices.create: index: test body: settings: index: - time_series_mode: true + mode: time_series number_of_shards: 10 routing_partition_size: 2 mappings: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml index b45539d77dd7f..b7be93b105b04 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_unsupported_operations.yml @@ -5,7 +5,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml index bbecab2f2b9a8..50dba96504030 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml @@ -5,7 +5,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -70,7 +70,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -99,7 +99,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml index a08ce7b22b2b7..8e28ad47be369 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml @@ -10,7 +10,7 @@ body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml index ab0e91da5fd1d..97454c553fff0 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml @@ -10,7 +10,7 @@ keyword dimension: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -78,7 +78,7 @@ long dimension: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": @@ -146,7 +146,7 @@ ip dimension: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml index 8d54a100971bd..dc9d44e0a760f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_index_resize.yml @@ -5,7 +5,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series number_of_shards: 3 number_of_replicas: 0 mappings: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml index e822f73815d9e..84947fc722360 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_term_vectors.yml @@ -5,7 +5,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 71e4c1c0e34f4..0c34d8405c6c7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -58,6 +58,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.ReplicaShardAllocatorIT; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MockEngineFactoryPlugin; @@ -273,7 +274,7 @@ public void testReplicaRecovery() throws Exception { Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) .build() ).setMapping(minimalMapping()) ); @@ -353,7 +354,7 @@ public void testCancelNewShardRecoveryAndUsesExistingShardCopy() throws Exceptio .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms") - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) .build() ).setMapping(minimalMapping()) ); @@ -721,7 +722,7 @@ private IndicesStatsResponse createAndPopulateIndex(String name, int nodeCount, .put("number_of_shards", shardCount) .put("number_of_replicas", replicaCount) .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ) .setMapping(minimalMapping()) ); @@ -774,7 +775,7 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); @@ -956,7 +957,7 @@ public void testDisconnectsWhileRecovering() throws Exception { .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); @@ -1069,7 +1070,7 @@ public void testDisconnectsDuringRecovery() throws Exception { .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); @@ -1179,7 +1180,7 @@ public void testHistoryRetention() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) .put(IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.getKey(), 1.0) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); ensureGreen(indexName); @@ -1241,7 +1242,7 @@ public void testDoNotInfinitelyWaitForMapping() throws IOException { .putList("index.analysis.analyzer.test_analyzer.filter", "test_token_filter") .put("index.number_of_replicas", 0) .put("index.number_of_shards", 1) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); client().admin().indices().preparePutMapping("test").setSource("test_field", "type=text,analyzer=test_analyzer").get(); @@ -1283,7 +1284,7 @@ public void testOngoingRecoveryAndMasterFailOver() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put("index.routing.allocation.include._name", nodeWithPrimary) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); MockTransportService transport = (MockTransportService) internalCluster().getInstance(TransportService.class, nodeWithPrimary); @@ -1333,7 +1334,7 @@ public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { // disable global checkpoint background sync so we can verify the start recovery request .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "12h") .put("index.routing.allocation.include._name", String.join(",", nodes)) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); ensureGreen(indexName); @@ -1420,7 +1421,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() throws Exception .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) .build() ).setMapping(minimalMapping()) ); @@ -1471,7 +1472,7 @@ public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() t .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) .build() ).setMapping(minimalMapping()) ); @@ -1526,7 +1527,7 @@ public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonabl .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h") .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()); + .put(IndexSettings.MODE.getKey(), indexMode()); final double reasonableOperationsBasedRecoveryProportion; if (randomBoolean()) { @@ -1630,7 +1631,7 @@ public void testDoesNotCopyOperationsInSafeCommit() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) .build() ).setMapping(minimalMapping()) ); @@ -1696,7 +1697,7 @@ public void testRepeatedRecovery() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 6)) .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "200ms") - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ).setMapping(minimalMapping()) ); indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, randomIntBetween(0, 10)) @@ -1742,7 +1743,7 @@ public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() throws Exception { .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) .put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), randomBoolean()) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ) .setMapping(minimalMapping()) ); @@ -1775,7 +1776,7 @@ public void testPeerRecoveryTrimsLocalTranslog() throws Exception { .put("index.number_of_shards", 1) .put("index.number_of_replicas", 1) .put("index.routing.allocation.include._name", String.join(",", dataNodes)) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) .build() ).setMapping(minimalMapping()) ); @@ -1833,7 +1834,7 @@ public void testCancelRecoveryWithAutoExpandReplicas() throws Exception { .setSettings( Settings.builder() .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-all") - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode()) ) .setMapping(minimalMapping()) .setWaitForActiveShards(ActiveShardCount.NONE) @@ -1859,7 +1860,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .put("index.number_of_shards", 1) .put("index.number_of_replicas", 0) .put("index.routing.allocation.include._name", String.join(",", dataNodes)) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), inTimeSeriesMode()) + .put(IndexSettings.MODE.getKey(), indexMode().toString()) .build() ).setMapping(minimalMapping()) ); @@ -1928,8 +1929,8 @@ private void assertGlobalCheckpointIsStableAndSyncedInAllNodes(String indexName, } } - protected boolean inTimeSeriesMode() { - return false; + protected IndexMode indexMode() { + return IndexMode.STANDARD; } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java index 1af15deb7bb78..92469fa758607 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryInTimeSeriesModeIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.test.BackgroundIndexer; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -22,8 +23,8 @@ @ClusterScope(scope = Scope.TEST, numDataNodes = 0) public class IndexRecoveryInTimeSeriesModeIT extends IndexRecoveryIT { @Override - protected boolean inTimeSeriesMode() { - return true; + protected IndexMode indexMode() { + return IndexMode.TIME_SERIES; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index ae0bf8a935ed6..0aae4a8f58543 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -347,19 +347,11 @@ static void prohibitCustomRoutingOnDataStream(DocWriteRequest writeRequest, M } } - static void prohibitInTimeSeriesMode(DocWriteRequest writeRequest, IndexAbstraction abstraction) { + static void checkDestinationMode(DocWriteRequest writeRequest, IndexAbstraction abstraction) { if (abstraction == null || abstraction.getWriteIndex() == null) { return; } - if (abstraction.getWriteIndex().inTimeSeriesMode()) { - throw new IllegalArgumentException( - "[" - + writeRequest.opType() - + "] is not supported because the destination index [" - + abstraction.getName() - + "] is in time series mode" - ); - } + abstraction.getWriteIndex().mode().checkDocWriteRequest(writeRequest.opType(), abstraction.getName()); } boolean isOnlySystem(BulkRequest request, SortedMap indicesLookup, SystemIndices systemIndices) { @@ -463,6 +455,7 @@ protected void doRun() { switch (docWriteRequest.opType()) { case CREATE: case INDEX: + checkDestinationMode(docWriteRequest, indexAbstraction); prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); IndexRequest indexRequest = (IndexRequest) docWriteRequest; @@ -473,12 +466,12 @@ protected void doRun() { indexRequest.process(indexCreated, mappingMd, concreteIndex.getName()); break; case UPDATE: - prohibitInTimeSeriesMode(docWriteRequest, indexAbstraction); + checkDestinationMode(docWriteRequest, indexAbstraction); TransportUpdateAction.resolveAndValidateRouting(metadata, concreteIndex.getName(), (UpdateRequest) docWriteRequest); break; case DELETE: - prohibitInTimeSeriesMode(docWriteRequest, indexAbstraction); + checkDestinationMode(docWriteRequest, indexAbstraction); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); // check if routing is required, if so, throw error if routing wasn't specified if (docWriteRequest.routing() == null && metadata.routingRequired(concreteIndex.getName())) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 94b737d6942fb..ad2143f57c969 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -519,11 +519,11 @@ private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse private static String routing(IndexRequest request, IndexSettings settings) { // TODO remove when tsid is a native field on IndexRequest - return settings.inTimeSeriesMode() ? null : request.routing(); + return settings.mode().organizeIntoTimeSeries() ? null : request.routing(); } private static BytesReference timeSeriesId(IndexRequest request, IndexSettings settings) { // TODO remove when tsid is a native field on IndexRequest - return settings.inTimeSeriesMode() ? request.timeSeriesId() : null; + return settings.mode().organizeIntoTimeSeries() ? request.timeSeriesId() : null; } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index d269c0cc881d5..ba5baf6499a9d 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -625,10 +625,10 @@ public void resolveRouting( ) { // TODO clean this up once we tsid is its own field String routingFromAliasOrRequest = metadata.resolveWriteIndexRouting(routing(), index); - boolean inTimeSeriesMode = abstraction == null || abstraction.getWriteIndex() == null + boolean generateTimeSeriesId = abstraction == null || abstraction.getWriteIndex() == null ? false - : abstraction.getWriteIndex().inTimeSeriesMode(); - if (inTimeSeriesMode) { + : abstraction.getWriteIndex().mode().organizeIntoTimeSeries(); + if (generateTimeSeriesId) { if (routing() != null) { throw new IllegalArgumentException( "routing cannot be set because the destination index [" + abstraction.getName() + "] is in time series mode" diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 33d168941c57d..eebf3640585f4 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -176,11 +177,7 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< protected void shardOperation(final UpdateRequest request, final ActionListener listener, final int retryCount) { final ShardId shardId = request.getShardId(); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - if (indexService.getMetadata().inTimeSeriesMode()) { - throw new IllegalArgumentException( - "[UPDATE] is not supported because the destination index [" + shardId.getIndexName() + "] is in time series mode" - ); - } + indexService.getMetadata().mode().checkDocWriteRequest(OpType.UPDATE, shardId.getIndexName()); final IndexShard indexShard = indexService.getShard(shardId.getId()); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis); switch (result.getResponseResult()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 025b2233f4832..0d597b9dedb05 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -44,6 +44,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.gateway.MetadataStateFormat; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -391,7 +392,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final boolean isSystem; private final IndexLongFieldRange timestampRange; - private final boolean timeSeriesMode; + private final IndexMode mode; private IndexMetadata( final Index index, @@ -419,7 +420,7 @@ private IndexMetadata( final ImmutableOpenMap rolloverInfos, final boolean isSystem, final IndexLongFieldRange timestampRange, - final boolean inTimeSeriesMode) { + final IndexMode mode) { this.index = index; this.version = version; @@ -452,8 +453,8 @@ private IndexMetadata( this.rolloverInfos = rolloverInfos; this.isSystem = isSystem; this.timestampRange = timestampRange; - this.timeSeriesMode = inTimeSeriesMode; - assert false == (timeSeriesMode + this.mode = mode; + assert false == (mode.organizeIntoTimeSeries() && isRoutingPartitionedIndex()) : "time series indices incompatible with routing partitioned indices"; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -1288,7 +1289,7 @@ public IndexMetadata build() { } final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); - final boolean inTimeSeriesMode = IndexSettings.TIME_SERIES_MODE.get(settings); + final IndexMode mode = IndexSettings.MODE.get(settings); return new IndexMetadata( new Index(index, uuid), @@ -1316,7 +1317,7 @@ public IndexMetadata build() { rolloverInfos.build(), isSystem, timestampRange, - inTimeSeriesMode); + mode); } @SuppressWarnings("unchecked") @@ -1649,8 +1650,8 @@ public int getRoutingFactor() { return routingFactor; } - public boolean inTimeSeriesMode() { - return timeSeriesMode; + public IndexMode mode() { + return mode; } /** diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 67b99475be6c7..75cd526920222 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -184,7 +184,7 @@ private static Set> builtInIndexSettings() { return ALWAYS_ENABLED_BUILT_IN_INDEX_SETTINGS; } Set> result = new HashSet<>(ALWAYS_ENABLED_BUILT_IN_INDEX_SETTINGS); - result.add(IndexSettings.TIME_SERIES_MODE); + result.add(IndexSettings.MODE); return Set.copyOf(result); } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 487bacf1dd821..f9b21d833e2fa 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -1402,7 +1402,28 @@ public static ByteSizeValue parseByteSize(String s, ByteSizeValue minValue, Byte * @return the setting object */ public static > Setting enumSetting(Class clazz, String key, T defaultValue, Property... properties) { - return new Setting<>(key, defaultValue.toString(), e -> Enum.valueOf(clazz, e.toUpperCase(Locale.ROOT)), properties); + return enumSetting(clazz, key, defaultValue, s -> {}, properties); + } + + /** + * Creates a setting where the allowed values are defined as enum constants. All enum constants must be uppercase. + * + * @param clazz the enum class + * @param key the key for the setting + * @param defaultValue the default value for this setting + * @param validator validator for this setting + * @param properties properties for this setting like scope, filtering... + * @param the generics type parameter reflecting the actual type of the enum + * @return the setting object + */ + public static > Setting enumSetting( + Class clazz, + String key, + T defaultValue, + Validator validator, + Property... properties + ) { + return new Setting<>(key, defaultValue.toString(), e -> Enum.valueOf(clazz, e.toUpperCase(Locale.ROOT)), validator, properties); } /** diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java new file mode 100644 index 0000000000000..ca137a1319ccc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteRequest.OpType; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.index.mapper.MappedFieldType; + +import java.util.List; +import java.util.Map; + +/** + * The "mode" of the index. + */ +public enum IndexMode { + /** + * Elasticsearch's traditional, search engine-like mode. + */ + STANDARD { + @Override + void validateWithOtherSettings(Map, Object> settings) {} + + @Override + public boolean organizeIntoTimeSeries() { + return false; + } + + @Override + public void checkDocWriteRequest(OpType opType, String indexName) {} + }, + /** + * A mode for time series data which automatically organizes data using + * the {@link MappedFieldType#isDimension() dimensions} to get better + * storage efficiency and enable some additional operations. + */ + TIME_SERIES { + @Override + void validateWithOtherSettings(Map, Object> settings) { + if (settings.get(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING) != Integer.valueOf(1)) { + throw new IllegalArgumentException( + "[" + + IndexSettings.MODE.getKey() + + "=time_series] is incompatible with [" + + IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.getKey() + + "]" + ); + } + } + + @Override + public boolean organizeIntoTimeSeries() { + return true; + } + + @Override + public void checkDocWriteRequest(OpType opType, String indexName) { + switch (opType) { + case INDEX: + case CREATE: + return; + case DELETE: + case UPDATE: + throw new IllegalArgumentException( + "[" + opType + "] is not supported because the destination index [" + indexName + "] is in time series mode" + ); + } + } + + }; + + static final List> VALIDATE_WITH_SETTINGS = List.of(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING); + + abstract void validateWithOtherSettings(Map, Object> settings); + + public abstract boolean organizeIntoTimeSeries(); + + public abstract void checkDocWriteRequest(DocWriteRequest.OpType opType, String indexName); +} diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index c70a9bcb4ce9a..3ad69f2f6f23c 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.node.Node; @@ -337,38 +336,22 @@ public static boolean isTimeSeriesModeEnabled() { } /** - * Is the index in time series mode? Time series mode indices are - * automatically routed and sorted on a the - * {@link TimeSeriesIdFieldMapper _tsid} field. {@code _tsid} itself - * is automatically {@link TimeSeriesIdGenerator generated} using - * the fields marked as "dimensions". + * The {@link IndexMode "mode"} of the index. */ - public static final Setting TIME_SERIES_MODE = Setting.boolSetting( // TODO make it "mode" and force the default to "standard" - "index.time_series_mode", - false, - new Setting.Validator() { + public static final Setting MODE = Setting.enumSetting(IndexMode.class, + "index.mode", + IndexMode.STANDARD, + new Setting.Validator() { @Override - public void validate(Boolean value) {} + public void validate(IndexMode value) {} @Override - public void validate(Boolean value, Map, Object> settings) { - if (false == value) { - return; - } - if (settings.get(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING) != Integer.valueOf(1)) { - throw new IllegalArgumentException( - "[" - + TIME_SERIES_MODE.getKey() - + "] is incompatible with [" - + IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING.getKey() - + "]" - ); - } + public void validate(IndexMode value, Map, Object> settings) { + value.validateWithOtherSettings(settings); } public Iterator> settings() { - List> dependencies = List.of(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING); - return dependencies.iterator(); + return IndexMode.VALIDATE_WITH_SETTINGS.iterator(); } }, Property.IndexScope @@ -454,13 +437,9 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { private volatile int maxRegexLength; /** - * Is the index in time series mode? Time series mode indices are - * automatically routed and sorted on a the - * {@link TimeSeriesIdFieldMapper _tsid} field. {@code _tsid} itself - * is automatically {@link TimeSeriesIdGenerator generated} using - * the fields marked as "dimensions". + * The {@link IndexMode "mode"} of the index. */ - private final boolean timeSeriesMode; + private final IndexMode mode; /** * Returns the default search fields for this index. @@ -563,7 +542,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); this.mergePolicyConfig = new MergePolicyConfig(logger, this); - timeSeriesMode = isTimeSeriesModeEnabled() ? scopedSettings.get(TIME_SERIES_MODE) : false; + mode = isTimeSeriesModeEnabled() ? scopedSettings.get(MODE) : IndexMode.STANDARD; this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); @@ -1101,7 +1080,7 @@ private void setMappingDimensionFieldsLimit(long value) { this.mappingDimensionFieldsLimit = value; } - public boolean inTimeSeriesMode() { - return timeSeriesMode; + public IndexMode mode() { + return mode; } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index a529ff28e4d72..ed0d401ef3877 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -112,16 +112,16 @@ private static MultiValueMode parseMultiValueMode(String value) { final FieldSortSpec[] sortSpecs; private final Version indexCreatedVersion; private final String indexName; - private final boolean timeSeriesMode; + private final IndexMode indexMode; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); this.indexCreatedVersion = indexSettings.getIndexVersionCreated(); this.indexName = indexSettings.getIndex().getName(); - this.timeSeriesMode = indexSettings.inTimeSeriesMode(); + this.indexMode = indexSettings.mode(); List fields = INDEX_SORT_FIELD_SETTING.get(settings); - if (timeSeriesMode) { + if (indexMode.organizeIntoTimeSeries()) { if (false == fields.isEmpty()) { throw new IllegalArgumentException("Can't set [" + INDEX_SORT_FIELD_SETTING.getKey() + "] in time series mode"); } @@ -204,8 +204,8 @@ public Sort buildIndexSort(Function fieldTypeLookup, final MappedFieldType ft = fieldTypeLookup.apply(sortSpec.field); if (ft == null) { String err = "unknown index sort field:[" + sortSpec.field + "]"; - if (timeSeriesMode) { - err += " required by [" + IndexSettings.TIME_SERIES_MODE.getKey() + "]"; + if (indexMode.organizeIntoTimeSeries()) { + err += " required by [" + IndexSettings.MODE.getKey() + "=time_series]"; } throw new IllegalArgumentException(err); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index b2db42b9da081..735e767978c58 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -25,14 +25,14 @@ public class DocumentMapper { public static DocumentMapper createEmpty(MapperService mapperService) { RootObjectMapper root = new RootObjectMapper.Builder(MapperService.SINGLE_MAPPING_NAME).build(new ContentPath(1)); MetadataFieldMapper[] metadata = mapperService.getMetadataMappers().values().toArray(new MetadataFieldMapper[0]); - Mapping mapping = new Mapping(root, metadata, null, mapperService.getIndexSettings().inTimeSeriesMode()); - return new DocumentMapper(mapperService.documentParser(), mapping, mapperService.getIndexSettings().inTimeSeriesMode()); + Mapping mapping = new Mapping(root, metadata, null, mapperService.getIndexSettings().mode()); + return new DocumentMapper(mapperService.documentParser(), mapping); } - DocumentMapper(DocumentParser documentParser, Mapping mapping, boolean inTimeSeriesMode) { + DocumentMapper(DocumentParser documentParser, Mapping mapping) { this.documentParser = documentParser; this.type = mapping.getRoot().name(); - this.mappingLookup = MappingLookup.fromMapping(mapping, inTimeSeriesMode); + this.mappingLookup = MappingLookup.fromMapping(mapping); this.mappingSource = mapping.toCompressedXContent(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 033761f78c58a..6bd2e3e46effd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -125,7 +125,7 @@ public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, metadataMapperParsers, this::getMetadataMappers, this::resolveDocumentType, - indexSettings.inTimeSeriesMode() + indexSettings.mode() ); } @@ -289,7 +289,7 @@ private synchronized DocumentMapper mergeAndApplyMappings(String mappingType, Co } private DocumentMapper newDocumentMapper(Mapping mapping, MergeReason reason) { - DocumentMapper newMapper = new DocumentMapper(documentParser, mapping, indexSettings.inTimeSeriesMode()); + DocumentMapper newMapper = new DocumentMapper(documentParser, mapping); newMapper.mapping().getRoot().fixRedundantIncludes(); newMapper.validate(indexSettings, reason != MergeReason.MAPPING_RECOVERY); return newMapper; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 62f0ada9274d5..2dd40599af03e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -17,6 +18,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -36,20 +38,21 @@ public final class Mapping implements ToXContentFragment { public static final Mapping EMPTY = new Mapping( - new RootObjectMapper.Builder("_doc").build(new ContentPath()), new MetadataFieldMapper[0], null, false); + new RootObjectMapper.Builder("_doc").build(new ContentPath()), new MetadataFieldMapper[0], null, IndexMode.STANDARD); private final RootObjectMapper root; private final Map meta; private final MetadataFieldMapper[] metadataMappers; private final Map, MetadataFieldMapper> metadataMappersMap; private final Map metadataMappersByName; + private final IndexMode indexMode; private final TimeSeriesIdGenerator timeSeriesIdGenerator; public Mapping( RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map meta, - boolean inTimeSeriesMode + IndexMode indexMode ) { this.metadataMappers = metadataMappers; Map, MetadataFieldMapper> metadataMappersMap = new HashMap<>(); @@ -69,7 +72,8 @@ public int compare(Mapper o1, Mapper o2) { this.metadataMappersMap = unmodifiableMap(metadataMappersMap); this.metadataMappersByName = unmodifiableMap(metadataMappersByName); this.meta = meta; - this.timeSeriesIdGenerator = inTimeSeriesMode + this.indexMode = indexMode; + this.timeSeriesIdGenerator = indexMode.organizeIntoTimeSeries() ? TimeSeriesIdGenerator.build(root.selectTimeSeriesIdComponents()) : null; } @@ -129,7 +133,7 @@ void validate(MappingLookup mappers) { * Generate a mapping update for the given root object mapper. */ Mapping mappingUpdate(RootObjectMapper rootObjectMapper) { - return new Mapping(rootObjectMapper, metadataMappers, meta, inTimeSeriesMode()); + return new Mapping(rootObjectMapper, metadataMappers, meta, indexMode); } /** @@ -170,7 +174,7 @@ Mapping merge(Mapping mergeWith, MergeReason reason) { XContentHelper.mergeDefaults(mergedMeta, meta); } - return new Mapping(mergedRoot, mergedMetadataMappers.values().toArray(new MetadataFieldMapper[0]), mergedMeta, inTimeSeriesMode()); + return new Mapping(mergedRoot, mergedMetadataMappers.values().toArray(new MetadataFieldMapper[0]), mergedMeta, indexMode); } @Override @@ -198,11 +202,14 @@ public String toString() { } } - public TimeSeriesIdGenerator getTimeSeriesIdGenerator() { - return timeSeriesIdGenerator; + public BytesReference generateTimeSeriesIdIfNeeded(BytesReference source, XContentType xContentType) { + if (timeSeriesIdGenerator == null) { + return null; + } + return timeSeriesIdGenerator.generate(source, xContentType); } - private boolean inTimeSeriesMode() { - return timeSeriesIdGenerator != null; + public TimeSeriesIdGenerator getTimeSeriesIdGenerator() { + return timeSeriesIdGenerator; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index e78e911730dd9..61594fa0144da 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -58,7 +58,7 @@ private CacheKey() {} * @param mapping the mapping source * @return the newly created lookup instance */ - public static MappingLookup fromMapping(Mapping mapping, boolean inTimeSeriesMode) { + public static MappingLookup fromMapping(Mapping mapping) { List newObjectMappers = new ArrayList<>(); List newFieldMappers = new ArrayList<>(); List newFieldAliasMappers = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java index 155bba74f3662..8d7431d8d7377 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.IndexMode; import java.util.Collections; import java.util.HashMap; @@ -30,18 +31,18 @@ public final class MappingParser { private final Supplier, MetadataFieldMapper>> metadataMappersSupplier; private final Map metadataMapperParsers; private final Function documentTypeResolver; - private final boolean inTimeSeriesMode; + private final IndexMode indexMode; MappingParser(Supplier parserContextSupplier, Map metadataMapperParsers, Supplier, MetadataFieldMapper>> metadataMappersSupplier, Function documentTypeResolver, - boolean inTimeSeriesMode) { + IndexMode indexMode) { this.parserContextSupplier = parserContextSupplier; this.metadataMapperParsers = metadataMapperParsers; this.metadataMappersSupplier = metadataMappersSupplier; this.documentTypeResolver = documentTypeResolver; - this.inTimeSeriesMode = inTimeSeriesMode; + this.indexMode = indexMode; } /** @@ -149,6 +150,6 @@ private Mapping parse(String type, Map mapping) throws MapperPar rootObjectMapper, metadataMappers.values().toArray(new MetadataFieldMapper[0]), meta, - inTimeSeriesMode); + indexMode); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java index bab89856cfc14..ee782e3f10f01 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RoutingFieldMapper.java @@ -12,6 +12,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.query.SearchExecutionContext; import java.util.Collections; @@ -102,7 +103,7 @@ public boolean required() { @Override public void preParse(DocumentParserContext context) { String routing = context.sourceToParse().routing(); - if (context.indexSettings().inTimeSeriesMode()) { + if (context.indexSettings().mode() == IndexMode.TIME_SERIES) { // TODO when we stop storing the tsid in the routing fail any request with routing in time series mode // the routing will always come from the time series id. return; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index 528c797b3429b..ddb33e8746bd8 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -46,10 +46,15 @@ public static SourceToParse parseTimeSeriesIdFromSource( Map dynamicTemplates, MappingLookup lookup ) { - BytesReference timeSeriesId = lookup.getMapping().getTimeSeriesIdGenerator() == null - ? null - : lookup.getMapping().getTimeSeriesIdGenerator().generate(source, xContentType); - return new SourceToParse(index, id, source, xContentType, routing, timeSeriesId, dynamicTemplates); + return new SourceToParse( + index, + id, + source, + xContentType, + routing, + lookup.getMapping().generateTimeSeriesIdIfNeeded(source, xContentType), + dynamicTemplates + ); } private final BytesReference source; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index 9aba859e40629..b53a543510cee 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -99,7 +99,7 @@ private TimeSeriesIdFieldMapper() { @Override public void preParse(DocumentParserContext context) throws IOException { - if (false == context.indexSettings().inTimeSeriesMode()) { + if (false == context.indexSettings().mode().organizeIntoTimeSeries()) { return; } assert fieldType().isSearchable() == false; diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index 9a4e2af9dcfb8..894e88583d02a 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -21,7 +21,6 @@ import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.regex.Regex; @@ -284,11 +283,15 @@ private static Fields generateTermVectors(IndexShard indexShard, private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException { MappingLookup mappingLookup = indexShard.mapperService().mappingLookup(); - BytesReference timeSeriesId = indexShard.mapperService().getIndexSettings().inTimeSeriesMode() - ? mappingLookup.getMapping().getTimeSeriesIdGenerator().generate(request.doc(), request.xContentType()) - : null; - SourceToParse source = new SourceToParse(indexShard.shardId().getIndexName(), "_id_for_tv_api", request.doc(), - request.xContentType(), request.routing(), timeSeriesId, Map.of()); + SourceToParse source = new SourceToParse( + indexShard.shardId().getIndexName(), + "_id_for_tv_api", + request.doc(), + request.xContentType(), + request.routing(), + mappingLookup.getMapping().generateTimeSeriesIdIfNeeded(request.doc(), request.xContentType()), + Map.of() + ); DocumentParser documentParser = indexShard.mapperService().documentParser(); ParsedDocument parsedDocument = documentParser.parseDocument(source, mappingLookup); // select the right fields and generate term vectors diff --git a/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java index 8f6134d1d644c..5edf8c602cfc5 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java @@ -135,7 +135,7 @@ protected void doClose() {} @Override public TimeSeriesIdGenerator apply(IndexMetadata meta) { - if (false == meta.inTimeSeriesMode()) { + if (false == meta.mode().organizeIntoTimeSeries()) { return null; } Value v = byIndex.get(meta.getIndex()); @@ -181,7 +181,7 @@ void applyClusterState(Metadata metadata) { for (ObjectCursor cursor : metadata.indices().values()) { IndexMetadata indexMetadata = cursor.value; - if (false == indexMetadata.inTimeSeriesMode()) { + if (false == indexMetadata.mode().organizeIntoTimeSeries()) { continue; } Index index = indexMetadata.getIndex(); @@ -244,7 +244,7 @@ void applyClusterState(Metadata metadata) { */ for (ObjectCursor cursor : metadata.indices().values()) { IndexMetadata indexMetadata = cursor.value; - if (false == indexMetadata.inTimeSeriesMode()) { + if (false == indexMetadata.mode().organizeIntoTimeSeries()) { continue; } Index index = indexMetadata.getIndex(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java index c71e26e8d9ef6..56c5b66fb2021 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverServiceTests.java @@ -33,15 +33,16 @@ import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -581,7 +582,12 @@ protected String contentType() { RootObjectMapper.Builder root = new RootObjectMapper.Builder("_doc"); root.add(new DateFieldMapper.Builder(dataStream.getTimeStampField().getName(), DateFieldMapper.Resolution.MILLISECONDS, DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ScriptCompiler.NONE, true, Version.CURRENT)); - Mapping mapping = new Mapping(root.build(new ContentPath("")), metadataFieldMappers, Collections.emptyMap(), randomBoolean()); + Mapping mapping = new Mapping( + root.build(new ContentPath("")), + metadataFieldMappers, + Collections.emptyMap(), + randomFrom(IndexMode.values()) + ); MappingLookup mappingLookup = MappingLookup.fromMappers( mapping, List.of(mockedTimestampField, dateFieldMapper), diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index f3a29ddbcbee8..a4b7627f209e5 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -247,28 +247,28 @@ public void testProhibitCustomRoutingOnDataStream() throws Exception { prohibitCustomRoutingOnDataStream(writeRequestAgainstIndex, metadata); } - public void testProhibitedInTimeSeriesModeWithoutATarget() throws Exception { + public void testCheckDestinationModeWithoutATarget() throws Exception { // Doesn't throw - TransportBulkAction.prohibitInTimeSeriesMode(prohibitedInTimeSeriesMode(), null); + TransportBulkAction.checkDestinationMode(prohibitedInTimeSeriesMode(), null); } - public void testProhibitedInTimeSeriesModeNotInTimeSeriesMode() throws Exception { + public void testCheckDestinationModeInStandardMode() throws Exception { Settings settings = Settings.builder().put("index.version.created", Version.CURRENT).build(); IndexMetadata writeIndex = IndexMetadata.builder("idx").settings(settings).numberOfReplicas(0).numberOfShards(1).build(); // Doesn't throw - TransportBulkAction.prohibitInTimeSeriesMode(prohibitedInTimeSeriesMode(), new IndexAbstraction.Index(writeIndex)); + TransportBulkAction.checkDestinationMode(prohibitedInTimeSeriesMode(), new IndexAbstraction.Index(writeIndex)); } - public void testProhibitedInTimeSeriesMode() throws Exception { + public void testCheckDestinationModeInTimeSeriesMode() throws Exception { Settings settings = Settings.builder() .put("index.version.created", Version.CURRENT) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), true) + .put(IndexSettings.MODE.getKey(), "time_series") .build(); IndexMetadata writeIndex = IndexMetadata.builder("idx").settings(settings).numberOfReplicas(0).numberOfShards(1).build(); DocWriteRequest prohibited = prohibitedInTimeSeriesMode(); Exception e = expectThrows( IllegalArgumentException.class, - () -> TransportBulkAction.prohibitInTimeSeriesMode(prohibited, new IndexAbstraction.Index(writeIndex)) + () -> TransportBulkAction.checkDestinationMode(prohibited, new IndexAbstraction.Index(writeIndex)) ); assertThat( e.getMessage(), diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 3c19b89b23702..28efcf15ba90c 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.bulk.stats.BulkStats; @@ -224,19 +225,19 @@ public void testSkipBulkIndexRequestIfAborted() throws Exception { } public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { - boolean timeSeriesMode = randomBoolean(); + IndexMode mode = randomFrom(IndexMode.values()); BulkItemRequest[] items = new BulkItemRequest[1]; IndexRequest writeRequest = new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "foo", "bar"); - if (timeSeriesMode) { - writeRequest.routing("tsid"); + if (mode.organizeIntoTimeSeries()) { + writeRequest.routing("tsid"); // TODO move this into its own field in a follow up } items[0] = new BulkItemRequest(0, writeRequest); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); Engine.IndexResult mappingUpdate = new Engine.IndexResult( - new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), timeSeriesMode) + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), mode) ); Translog.Location resultLocation = new Translog.Location(42, 42, 42); Engine.IndexResult success = new FakeIndexResult(1, 1, 13, true, resultLocation); @@ -246,7 +247,7 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())) .thenReturn(mappingUpdate); when(shard.mapperService()).thenReturn(mock(MapperService.class)); - when(shard.indexSettings()).thenReturn(indexSettings(timeSeriesMode)); + when(shard.indexSettings()).thenReturn(indexSettings(mode)); randomlySetIgnoredPrimaryResponse(items[0]); @@ -767,7 +768,7 @@ public void testRetries() throws Exception { "I'm conflicted <(;_;)>"); Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0); Engine.IndexResult mappingUpdate = new Engine.IndexResult( - new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), randomBoolean()) + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), randomFrom(IndexMode.values())) ); Translog.Location resultLocation = new Translog.Location(42, 42, 42); Engine.IndexResult success = new FakeIndexResult(1, 1, 13, true, resultLocation); @@ -847,9 +848,8 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { items[1] = new BulkItemRequest(1, writeRequest2); BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); - boolean timeSeriesMode = false; Engine.IndexResult mappingUpdate = new Engine.IndexResult( - new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), timeSeriesMode) + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap(), IndexMode.STANDARD) ); Translog.Location resultLocation1 = new Translog.Location(42, 36, 36); Translog.Location resultLocation2 = new Translog.Location(42, 42, 42); @@ -862,7 +862,7 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { .thenReturn(success1, mappingUpdate, success2); when(shard.getFailedIndexResult(any(EsRejectedExecutionException.class), anyLong())).thenCallRealMethod(); when(shard.mapperService()).thenReturn(mock(MapperService.class)); - when(shard.indexSettings()).thenReturn(indexSettings(timeSeriesMode)); + when(shard.indexSettings()).thenReturn(indexSettings(IndexMode.STANDARD)); randomlySetIgnoredPrimaryResponse(items[0]); @@ -962,10 +962,10 @@ public void testPerformOnPrimaryReportsBulkStats() throws Exception { latch.await(); } - private IndexSettings indexSettings(boolean timeSeriesMode) throws IOException { + private IndexSettings indexSettings(IndexMode mode) throws IOException { return new IndexSettings( indexMetadata(), - Settings.builder().put(idxSettings).put(IndexSettings.TIME_SERIES_MODE.getKey(), timeSeriesMode).build() + Settings.builder().put(idxSettings).put(IndexSettings.MODE.getKey(), mode).build() ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java index 5feafb3799487..e56a93265938e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/index/MappingUpdatedActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -144,7 +145,7 @@ public void testSendUpdateMappingUsingAutoPutMappingAction() { mua.setClient(client); RootObjectMapper rootObjectMapper = new RootObjectMapper.Builder("name").build(new ContentPath()); - Mapping update = new Mapping(rootObjectMapper, new MetadataFieldMapper[0], Map.of(), randomBoolean()); + Mapping update = new Mapping(rootObjectMapper, new MetadataFieldMapper[0], Map.of(), randomFrom(IndexMode.values())); mua.sendUpdateMapping(new Index("name", "uuid"), update, ActionListener.wrap(() -> {})); verify(indicesAdminClient).execute(eq(AutoPutMappingAction.INSTANCE), any(), any()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 1cfe961e83df2..3ace502e37973 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -191,7 +191,7 @@ public void testSortingAgainstAliasesPre713() { } public void testTimeSeriesMode() { - IndexSettings indexSettings = indexSettings(Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build()); + IndexSettings indexSettings = indexSettings(Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build()); Sort sort = buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE, new DateFieldMapper.DateFieldType("@timestamp")); assertThat(sort.getSort(), arrayWithSize(2)); assertThat(sort.getSort()[0].getField(), equalTo("_tsid")); @@ -199,9 +199,9 @@ public void testTimeSeriesMode() { } public void testTimeSeriesModeNoTimestamp() { - IndexSettings indexSettings = indexSettings(Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build()); + IndexSettings indexSettings = indexSettings(Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build()); Exception e = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE)); - assertThat(e.getMessage(), equalTo("unknown index sort field:[@timestamp] required by [index.time_series_mode]")); + assertThat(e.getMessage(), equalTo("unknown index sort field:[@timestamp] required by [index.mode=time_series]")); } private Sort buildIndexSort(IndexSettings indexSettings, MappedFieldType... mfts) { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java index abb877982ad82..163ff2403b5d9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentMapperTests.java @@ -69,7 +69,7 @@ public void testAddFields() throws Exception { assertThat(stage1.mappers().getMapper("obj1.prop1"), nullValue()); // but merged should DocumentParser documentParser = new DocumentParser(null, null, null, null); - DocumentMapper mergedMapper = new DocumentMapper(documentParser, merged, false); + DocumentMapper mergedMapper = new DocumentMapper(documentParser, merged); assertThat(mergedMapper.mappers().getMapper("age"), notNullValue()); assertThat(mergedMapper.mappers().getMapper("obj1.prop1"), notNullValue()); } @@ -332,7 +332,7 @@ public void testEmptyDocumentMapper() { public void testContainsTimeSeriesGenerator() throws IOException { DocumentMapper documentMapper = createMapperService( Version.CURRENT, - Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build(), () -> false, mapping(b -> b.startObject("dim").field("type", "keyword").field("dimension", true).endObject()) ).documentMapper(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index 3c899d64a62ca..eed46d31aca03 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.script.ScriptCompiler; import org.elasticsearch.test.ESTestCase; @@ -202,7 +203,7 @@ private static MappingLookup createMappingLookup(List fieldMappers, builder.build(new ContentPath()), new MetadataFieldMapper[0], Collections.emptyMap(), - randomBoolean() + randomFrom(IndexMode.values()) ); return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, fieldAliasMappers); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index c30d5a9871b31..be77cb1c63033 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -14,6 +14,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Strings; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.query.SearchExecutionContext; @@ -42,7 +43,7 @@ private static MappingLookup createMappingLookup(List fieldMappers, builder.build(new ContentPath()), new MetadataFieldMapper[0], Collections.emptyMap(), - randomBoolean() + randomFrom(IndexMode.values()) ); return MappingLookup.fromMappers(mapping, fieldMappers, objectMappers, emptyList()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 4786a03ec29e2..855a2186a7bea 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -13,7 +13,11 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.indices.IndicesModule; @@ -26,11 +30,16 @@ import java.util.Map; import java.util.function.Supplier; +import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; +import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + public class MappingParserTests extends MapperServiceTestCase { - private static MappingParser createMappingParser(Settings settings) { - ScriptService scriptService = new ScriptService(settings, Collections.emptyMap(), Collections.emptyMap()); - IndexSettings indexSettings = createIndexSettings(Version.CURRENT, settings); + private static MappingParser createMappingParser(IndexMode indexMode) { + ScriptService scriptService = new ScriptService(Settings.EMPTY, Collections.emptyMap(), Collections.emptyMap()); + IndexSettings indexSettings = createIndexSettings(Version.CURRENT, Settings.EMPTY); IndexAnalyzers indexAnalyzers = createIndexAnalyzers(); SimilarityService similarityService = new SimilarityService(indexSettings, scriptService, Collections.emptyMap()); MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); @@ -45,7 +54,7 @@ private static MappingParser createMappingParser(Settings settings) { metadataMapperParsers.values().stream().map(parser -> parser.getDefault(parserContextSupplier.get())) .forEach(m -> metadataMappers.put(m.getClass(), m)); return new MappingParser(parserContextSupplier, metadataMapperParsers, - () -> metadataMappers, type -> MapperService.SINGLE_MAPPING_NAME, randomBoolean()); + () -> metadataMappers, type -> MapperService.SINGLE_MAPPING_NAME, indexMode); } public void testFieldNameWithDots() throws Exception { @@ -53,7 +62,10 @@ public void testFieldNameWithDots() throws Exception { b.startObject("foo.bar").field("type", "text").endObject(); b.startObject("foo.baz").field("type", "keyword").endObject(); }); - Mapping mapping = createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(BytesReference.bytes(builder))); + Mapping mapping = createMappingParser(randomFrom(IndexMode.values())).parse( + "_doc", + new CompressedXContent(BytesReference.bytes(builder)) + ); Mapper object = mapping.getRoot().getMapper("foo"); assertThat(object, CoreMatchers.instanceOf(ObjectMapper.class)); @@ -62,6 +74,25 @@ public void testFieldNameWithDots() throws Exception { assertNotNull(objectMapper.getMapper("baz")); } + public void testTimeSeriesMode() throws Exception { + XContentBuilder builder = mapping(b -> { + b.startObject("@timestamp").field("type", "date").endObject(); + b.startObject("dim").field("type", "keyword").field("dimension", true).endObject(); + b.startObject("v").field("type", "double").endObject(); + }); + Mapping mapping = createMappingParser(IndexMode.TIME_SERIES).parse("_doc", new CompressedXContent(BytesReference.bytes(builder))); + assertThat(mapping.getTimeSeriesIdGenerator(), not(nullValue())); + XContentBuilder doc = JsonXContent.contentBuilder().startObject(); + doc.field("@timestamp", "2021-01-01T00:00:00Z"); + doc.field("dim", "rat"); + doc.field("v", 1.2); + doc.endObject(); + assertMap( + TimeSeriesIdGenerator.parse(mapping.generateTimeSeriesIdIfNeeded(BytesReference.bytes(doc), XContentType.JSON).streamInput()), + matchesMap().entry("dim", "rat") + ); + } + public void testFieldNameWithDeepDots() throws Exception { XContentBuilder builder = mapping(b -> { b.startObject("foo.bar").field("type", "text").endObject(); @@ -75,8 +106,11 @@ public void testFieldNameWithDeepDots() throws Exception { } b.endObject(); }); - Mapping mapping = createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(BytesReference.bytes(builder))); - MappingLookup mappingLookup = MappingLookup.fromMapping(mapping, randomBoolean()); + Mapping mapping = createMappingParser(randomFrom(IndexMode.values())).parse( + "_doc", + new CompressedXContent(BytesReference.bytes(builder)) + ); + MappingLookup mappingLookup = MappingLookup.fromMapping(mapping); assertNotNull(mappingLookup.getMapper("foo.bar")); assertNotNull(mappingLookup.getMapper("foo.baz.deep.field")); assertNotNull(mappingLookup.objectMappers().get("foo")); @@ -88,7 +122,7 @@ public void testFieldNameWithDotsConflict() throws IOException { b.startObject("foo.baz").field("type", "keyword").endObject(); }); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(BytesReference.bytes(builder)))); + () -> createMappingParser(randomFrom(IndexMode.values())).parse("_doc", new CompressedXContent(BytesReference.bytes(builder)))); assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [text] to [ObjectMapper]")); } @@ -112,7 +146,7 @@ public void testMultiFieldsWithFieldAlias() throws IOException { b.startObject("other-field").field("type", "keyword").endObject(); }); MapperParsingException e = expectThrows(MapperParsingException.class, - () -> createMappingParser(Settings.EMPTY).parse("_doc", new CompressedXContent(BytesReference.bytes(builder)))); + () -> createMappingParser(randomFrom(IndexMode.values())).parse("_doc", new CompressedXContent(BytesReference.bytes(builder)))); assertEquals("Type [alias] cannot be used in multi field", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java index 22c419f7f4769..dd66a02d7b4c7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -37,7 +37,7 @@ protected void registerParameters(ParameterChecker checker) throws IOException { public void testEnabledInTimeSeriesMode() throws Exception { DocumentMapper docMapper = createMapperService( - getIndexSettingsBuilder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), "time_series").build(), mapping(b -> {}) ).documentMapper(); @@ -52,9 +52,9 @@ public void testEnabledInTimeSeriesMode() throws Exception { assertThat(doc.rootDoc().get("field"), equalTo("value")); } - public void testDisabledOutsideOfTimeSeriesMode() throws Exception { + public void testDisabledInStandardMode() throws Exception { DocumentMapper docMapper = createMapperService( - getIndexSettingsBuilder().put(IndexSettings.TIME_SERIES_MODE.getKey(), false).build(), + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), "standard").build(), mapping(b -> {}) ).documentMapper(); diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 32e8a9076c26d..a42935f156c3b 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -331,7 +332,7 @@ private static MappingLookup createMappingLookup(List concreteF builder.build(new ContentPath()), new MetadataFieldMapper[0], Collections.emptyMap(), - randomBoolean() + randomFrom(IndexMode.values()) ); return MappingLookup.fromMappers(mapping, mappers, Collections.emptyList(), Collections.emptyList()); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 7c568f8390150..e80e393b10f40 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -273,7 +273,7 @@ private IndexShard setupTimeSeriesShard() throws IOException { Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexSettings.TIME_SERIES_MODE.getKey(), true) + .put(IndexSettings.MODE.getKey(), "time_series") .build(); XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); { diff --git a/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java b/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java index 3e8bf2e9911b1..396f59cb3c4ea 100644 --- a/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/TimeSeriesIdGeneratorServiceTests.java @@ -281,7 +281,7 @@ private TimeSeriesIdGeneratorService genService( private IndexMetadata.Builder index(String index, boolean timeSeriesMode, String mapping) { Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT); if (timeSeriesMode) { - settings.put(IndexSettings.TIME_SERIES_MODE.getKey(), true); + settings.put(IndexSettings.MODE.getKey(), "time_series"); } IndexMetadata.Builder builder = IndexMetadata.builder(index) .settings(settings) diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 497e2cb7d17bc..8b4c4495dda45 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -465,7 +465,7 @@ protected IndexShard newStartedShard() throws IOException { protected IndexShard newStartedTimeSeriesShard(boolean primary) throws IOException { return newStartedShard( primary, - Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build(), new InternalEngineFactory() ); } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index 88869c8c2f94f..b059601e7155f 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -247,7 +247,7 @@ public void testFollowTsdbIndex() throws Exception { logger.info("Running against leader cluster"); createIndex( leaderIndexName, - Settings.builder().put(IndexSettings.TIME_SERIES_MODE.getKey(), true).build(), + Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build(), "\"properties\": {\"@timestamp\": {\"type\": \"date\"}, \"dim\": {\"type\": \"keyword\", \"dimension\": true}}" ); for (int i = 0; i < numDocs; i++) { @@ -284,12 +284,12 @@ public void testFollowTsdbIndex() throws Exception { if (overrideNumberOfReplicas) { assertMap( getIndexSettingsAsMap(followIndexName), - matchesMap().extraOk().entry("index.time_series_mode", "true").entry("index.number_of_replicas", "0") + matchesMap().extraOk().entry("index.mode", "time_series").entry("index.number_of_replicas", "0") ); } else { assertMap( getIndexSettingsAsMap(followIndexName), - matchesMap().extraOk().entry("index.time_series_mode", "true").entry("index.number_of_replicas", "1") + matchesMap().extraOk().entry("index.mode", "time_series").entry("index.number_of_replicas", "1") ); } }); @@ -347,11 +347,11 @@ public void testFollowTsdbIndexCanNotOverrideMode() throws Exception { "leader_cluster", "tsdb_leader", "tsdb_follower_bad", - Settings.builder().put("index.time_series_mode", false).build() + Settings.builder().put("index.mode", "standard").build() )); assertThat( e.getMessage(), - containsString("can not put follower index that could override leader settings {\\\"index.time_series_mode\\\":\\\"false\\\"}") + containsString("can not put follower index that could override leader settings {\\\"index.mode\\\":\\\"time_series\\\"}") ); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml index 5db603c04c168..786a934b31abb 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml @@ -5,7 +5,7 @@ setup: body: settings: index: - time_series_mode: true + mode: time_series mappings: properties: "@timestamp": From 41bbf09c41fdf7da1c102a310594142ebdb05428 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 2 Aug 2021 17:46:26 -0400 Subject: [PATCH 09/29] Better names --- .../rest-api-spec/test/tsdb/20_bad_config.yml | 2 +- .../indices/TimeSeriesIdGeneratorService.java | 76 +++++++++++-------- 2 files changed, 46 insertions(+), 32 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml index b157cec072456..6be229ce5bb91 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_bad_config.yml @@ -107,7 +107,7 @@ reason: introduced in 8.0.0 to be backported to 7.15.0 - do: - catch: /\[index.mode\] is incompatible with \[index.routing_partition_size\]/ + catch: /\[index.mode=time_series\] is incompatible with \[index.routing_partition_size\]/ indices.create: index: test body: diff --git a/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java index 5edf8c602cfc5..d9e36df712957 100644 --- a/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java +++ b/server/src/main/java/org/elasticsearch/indices/TimeSeriesIdGeneratorService.java @@ -62,9 +62,9 @@ public interface LocalIndex { } private final Function lookupLocalIndex; - private final Function buildTimeSeriedIdGenerator; + private final Function buildTimeSeriesIdGenerator; private final ExecutorService executor; // single thread to construct mapper services async as needed - private final Map byIndex = ConcurrentCollections.newConcurrentMap(); + private final Map byIndex = ConcurrentCollections.newConcurrentMap(); static TimeSeriesIdGeneratorService build(Settings nodeSettings, ThreadPool threadPool, IndicesService indicesService) { String nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(nodeSettings)); @@ -119,7 +119,7 @@ public TimeSeriesIdGenerator generator() { ) { this.executor = executor; this.lookupLocalIndex = lookupLocalIndex; - this.buildTimeSeriedIdGenerator = buildTimeSeriedIdGenerator; + this.buildTimeSeriesIdGenerator = buildTimeSeriedIdGenerator; } @Override @@ -138,7 +138,7 @@ public TimeSeriesIdGenerator apply(IndexMetadata meta) { if (false == meta.mode().organizeIntoTimeSeries()) { return null; } - Value v = byIndex.get(meta.getIndex()); + IdGeneratorProvider v = byIndex.get(meta.getIndex()); /* * v is rebuilt in applyClusterState which should have happened-before * whatever made meta available to the rest of the system. So the if @@ -177,7 +177,7 @@ void applyClusterState(Metadata metadata) { */ byIndex.keySet().removeIf(index -> metadata.index(index) == null); - Map dedupe = new HashMap<>(); + Map dedupe = new HashMap<>(); for (ObjectCursor cursor : metadata.indices().values()) { IndexMetadata indexMetadata = cursor.value; @@ -187,7 +187,7 @@ void applyClusterState(Metadata metadata) { Index index = indexMetadata.getIndex(); if (indexMetadata.mapping() == null) { - byIndex.put(index, new PreBuiltValue(indexMetadata.getMappingVersion(), TimeSeriesIdGenerator.EMPTY)); + byIndex.put(index, new PreBuiltIdGeneratorProvider(indexMetadata.getMappingVersion(), TimeSeriesIdGenerator.EMPTY)); continue; } @@ -196,7 +196,7 @@ void applyClusterState(Metadata metadata) { /* * Find indices who's mapping hasn't changed. */ - Value old = byIndex.get(index); + IdGeneratorProvider old = byIndex.get(index); if (old != null && old.mappingVersion == indexMetadata.getMappingVersion()) { logger.trace("reusing previous timeseries id generator for {}", index); dedupe.put(key, old); @@ -206,10 +206,10 @@ void applyClusterState(Metadata metadata) { /* * Check if the mapping is the same as something we've already seen. */ - Value value = dedupe.get(key); - if (value != null) { + IdGeneratorProvider provider = dedupe.get(key); + if (provider != null) { logger.trace("reusing timeseries id from another index for {}", index); - byIndex.put(index, value.withMappingVersion(indexMetadata.getMappingVersion())); + byIndex.put(index, provider.withMappingVersion(indexMetadata.getMappingVersion())); continue; } @@ -234,9 +234,9 @@ void applyClusterState(Metadata metadata) { + "]" ); } - value = new PreBuiltValue(indexMetadata.getMappingVersion(), localIndex.generator()); - byIndex.put(index, value); - dedupe.put(key, value); + provider = new PreBuiltIdGeneratorProvider(indexMetadata.getMappingVersion(), localIndex.generator()); + byIndex.put(index, provider); + dedupe.put(key, provider); } /* @@ -249,41 +249,55 @@ void applyClusterState(Metadata metadata) { } Index index = indexMetadata.getIndex(); - Value old = byIndex.get(index); + IdGeneratorProvider old = byIndex.get(index); if (old != null && old.mappingVersion == indexMetadata.getMappingVersion()) { // We already updated the generator in the first pass continue; } DedupeKey key = new DedupeKey(indexMetadata); - Value value = dedupe.get(key); - if (value == null) { + IdGeneratorProvider provider = dedupe.get(key); + if (provider == null) { logger.trace("computing timeseries id generator for {} async", index); - value = new AsyncValue(indexMetadata.getMappingVersion(), buildTimeSeriedIdGenerator, executor, indexMetadata); + provider = new AsyncIdGeneratorProvider( + indexMetadata.getMappingVersion(), + buildTimeSeriesIdGenerator, + executor, + indexMetadata + ); } else { logger.trace("reusing timeseries id from another index for {}", index); - value = value.withMappingVersion(indexMetadata.getMappingVersion()); + provider = provider.withMappingVersion(indexMetadata.getMappingVersion()); } - byIndex.put(index, value); + byIndex.put(index, provider); } } - private abstract static class Value { + /** + * Provides {@link TimeSeriesIdGenerator}s either + * {@link PreBuiltIdGeneratorProvider immediately} or + * {@link AsyncIdGeneratorProvider asynchronously}. + */ + private abstract static class IdGeneratorProvider { private final long mappingVersion; - protected Value(long mappingVersion) { + protected IdGeneratorProvider(long mappingVersion) { this.mappingVersion = mappingVersion; } abstract TimeSeriesIdGenerator generator(); - abstract Value withMappingVersion(long newMappingVersion); + abstract IdGeneratorProvider withMappingVersion(long newMappingVersion); } - private static class PreBuiltValue extends Value { + /** + * Provider {@link TimeSeriesIdGenerator}s for local indices or indices + * that have the same mapping as a local index. + */ + private static class PreBuiltIdGeneratorProvider extends IdGeneratorProvider { private final TimeSeriesIdGenerator generator; - PreBuiltValue(long mappingVersion, TimeSeriesIdGenerator generator) { + PreBuiltIdGeneratorProvider(long mappingVersion, TimeSeriesIdGenerator generator) { super(mappingVersion); this.generator = generator; } @@ -294,8 +308,8 @@ TimeSeriesIdGenerator generator() { } @Override - Value withMappingVersion(long newMappingVersion) { - return new PreBuiltValue(newMappingVersion, generator); + IdGeneratorProvider withMappingVersion(long newMappingVersion) { + return new PreBuiltIdGeneratorProvider(newMappingVersion, generator); } } @@ -307,15 +321,15 @@ Value withMappingVersion(long newMappingVersion) { * build the {@link TimeSeriesIdGenerator} and if they lose they'll return * a cached copy. */ - private static class AsyncValue extends Value { + private static class AsyncIdGeneratorProvider extends IdGeneratorProvider { private final LazyInitializable lazy; - private AsyncValue(long mappingVersion, LazyInitializable lazy) { + private AsyncIdGeneratorProvider(long mappingVersion, LazyInitializable lazy) { super(mappingVersion); this.lazy = lazy; } - AsyncValue( + AsyncIdGeneratorProvider( long mappingVersion, Function buildTimeSeriesIdGenerator, ExecutorService executor, @@ -356,8 +370,8 @@ TimeSeriesIdGenerator generator() { } @Override - Value withMappingVersion(long newMappingVersion) { - return new AsyncValue(newMappingVersion, lazy); + IdGeneratorProvider withMappingVersion(long newMappingVersion) { + return new AsyncIdGeneratorProvider(newMappingVersion, lazy); } } From 500ec306608ead0708e5223da50362a8ba9eeae5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 2 Aug 2021 17:48:29 -0400 Subject: [PATCH 10/29] fixup --- .../java/org/elasticsearch/index/TimeSeriesIdGenerator.java | 2 +- .../main/java/org/elasticsearch/index/mapper/IpFieldMapper.java | 1 + .../java/org/elasticsearch/index/mapper/KeywordFieldMapper.java | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java index d94686e7b384a..de973e13c6ab2 100644 --- a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java +++ b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java @@ -205,7 +205,7 @@ public BytesReference generate(XContentParser parser) throws IOException { Collections.sort(dimensionNames); throw new IllegalArgumentException("Document must contain one of the dimensions " + dimensionNames); } - Collections.sort(values, Comparator.comparing(Map.Entry::getKey)); + Collections.sort(values, Map.Entry.comparingByKey()); try (BytesStreamOutput out = new BytesStreamOutput()) { out.writeVInt(values.size()); for (Map.Entry> v : values) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index bf9dc447365bf..d64c228043843 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -520,6 +520,7 @@ public static TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(InetAddr } return new IpTsidGen(nullValue); } + private static class IpTsidGen extends TimeSeriesIdGenerator.StringLeaf { private static final IpTsidGen DEFAULT = new IpTsidGen(null); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 1ac661f271a56..e5c50dceba284 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -600,6 +600,7 @@ public static TimeSeriesIdGenerator.LeafComponent timeSeriesIdGenerator(String n } return new KeywordTsidGen(nullValue); } + private static class KeywordTsidGen extends TimeSeriesIdGenerator.StringLeaf { private static final KeywordTsidGen DEFAULT = new KeywordTsidGen(null); From 9075da42cda8643f6da3cd697703d3fc49928bd3 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 3 Aug 2021 09:44:07 -0400 Subject: [PATCH 11/29] Cleanup --- .../resources/rest-api-spec/test/tsdb/90_data_stream.yml | 0 .../main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java | 1 - 2 files changed, 1 deletion(-) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_data_stream.yml diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_data_stream.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_data_stream.yml new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java index de973e13c6ab2..a9fded29960bb 100644 --- a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java +++ b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; From 424ff619482f067b90810636d36b8c9c5462906c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 3 Aug 2021 09:56:33 -0400 Subject: [PATCH 12/29] data stream test --- .../test/tsdb/40_invalid_indexing.yml | 1 - .../test/tsdb/90_data_stream.yml | 0 .../test/data_stream/150_tsdb.yml | 125 ++++++++++++++++++ 3 files changed, 125 insertions(+), 1 deletion(-) delete mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_data_stream.yml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml index 50dba96504030..63716423913eb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml @@ -204,6 +204,5 @@ setup: - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' - match: {items.0.index.error.reason: "alias routing incompatible the destination index [test] because it is in time series mode"} -# TODO should indexing with an id fail too? # TODO should indexing without a @timestamp fail too? # TODO should an alias with search_routing fail? diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_data_stream.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/90_data_stream.yml deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml new file mode 100644 index 0000000000000..42cca9cca1178 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -0,0 +1,125 @@ +setup: + - skip: + features: allowed_warnings + + - do: + allowed_warnings: + - "index template [tsdbds-template1] has index patterns [simple-data-stream1] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation" + indices.put_index_template: + name: my-template1 + body: + index_patterns: [k8s*] + data_stream: {} + template: + settings: + index.number_of_replicas: 0 + mode: time_series + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + + - do: + bulk: + refresh: true + index: k8s + body: + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"create": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + +--- +created the data stream: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.get_data_stream: + name: '*' + - length: { data_streams: 1 } + - match: { data_streams.0.name: 'k8s' } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 1 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-k8s-\d{4}\.\d{2}\.\d{2}-000001/' } + - match: { data_streams.0.status: 'GREEN' } + - match: { data_streams.0.template: 'my-template1' } + - match: { data_streams.0.hidden: false } + - match: { data_streams.0.system: false } + +--- +fetch the tsid: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: k8s + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} + +--- +"aggregate the tsid": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + search: + index: k8s + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} From b5f7fbbdfa209af275b51de5bf16c7338209eb13 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 3 Aug 2021 14:10:19 -0400 Subject: [PATCH 13/29] snapshot/restore --- plugins/repository-hdfs/build.gradle | 2 +- .../test/hdfs_repository/50_tsdb.yml | 148 ++++++++++++++++++ 2 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/50_tsdb.yml diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 3ae7b83d34f0c..521b20ccf5211 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -76,7 +76,7 @@ dependencies { restResources { restApi { - include '_common', 'cluster', 'nodes', 'indices', 'index', 'snapshot' + include '_common', 'bulk', 'cluster', 'index', 'indices', 'nodes', 'search', 'snapshot' } } diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/50_tsdb.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/50_tsdb.yml new file mode 100644 index 0000000000000..65040658a607e --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/50_tsdb.yml @@ -0,0 +1,148 @@ +--- +"Create a snapshot and then restore it": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + features: ["allowed_warnings"] + + # Create repository + - do: + snapshot.create_repository: + repository: test_restore_repository + body: + type: hdfs + settings: + uri: "hdfs://localhost:9999" + path: "test/restore" + + # Create index + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test_index + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434595272, "rx": 530605511}}}}' + + + # Wait for green + - do: + cluster.health: + wait_for_status: green + + # Take snapshot + - do: + snapshot.create: + repository: test_restore_repository + snapshot: test_restore_tsdb + wait_for_completion: true + + - match: { snapshot.snapshot: test_restore_tsdb } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.successful: 2 } + - match: { snapshot.shards.failed : 0 } + - is_true: snapshot.version + - gt: { snapshot.version_id: 0} + + # Close index + - do: + indices.close: + index : test_index + allowed_warnings: + - "the default value for the ?wait_for_active_shards parameter will change from '0' to 'index-setting' in version 8; specify '?wait_for_active_shards=index-setting' to adopt the future default behaviour, or '?wait_for_active_shards=0' to preserve today's behaviour" + + # Restore index + - do: + snapshot.restore: + repository: test_restore_repository + snapshot: test_restore_tsdb + wait_for_completion: true + + # Check recovery stats + - do: + indices.recovery: + index: test_index + + - match: { test_index.shards.0.type: SNAPSHOT } + - match: { test_index.shards.0.stage: DONE } + - match: { test_index.shards.0.index.files.recovered: 1} + - gt: { test_index.shards.0.index.size.recovered_in_bytes: 0} + + # Remove our snapshot + - do: + snapshot.delete: + repository: test_restore_repository + snapshot: test_restore_tsdb + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_restore_repository + + - do: + search: + index: test_index + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} From c6c454e6559d7a3d5cdf34183382f37268d72875 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 3 Aug 2021 15:14:46 -0400 Subject: [PATCH 14/29] Alias tests --- .../rest-api-spec/test/tsdb/25_alias.yml | 170 ++++++++++++++++++ .../test/tsdb/40_invalid_indexing.yml | 50 ------ .../cluster/metadata/AliasAction.java | 6 + .../MetadataIndexAliasesServiceTests.java | 30 +++- 4 files changed, 205 insertions(+), 51 deletions(-) create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_alias.yml diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_alias.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_alias.yml new file mode 100644 index 0000000000000..d19ca3a321f62 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/25_alias.yml @@ -0,0 +1,170 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' + +--- +search an alias: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.put_alias: + index: test + name: test_alias + + - do: + search: + index: test_alias + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + +--- +index into alias: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.put_alias: + index: test + name: test_alias + + - do: + bulk: + refresh: true + index: test_alias + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434521831, "rx": 530575198}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434577921, "rx": 530600088}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434587694, "rx": 530604797}}}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434595272, "rx": 530605511}}}}' + - match: {errors: false} + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 12} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - match: {aggregations.tsids.buckets.2.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.2.doc_count: 4} + +--- +alias with routing fails: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /\[test\] is in time series mode which is incompatible with routing on aliases/ + indices.put_alias: + index: test + name: test_alias + body: + routing: cat + +--- +alias with index routing fails: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /\[test\] is in time series mode which is incompatible with routing on aliases/ + indices.put_alias: + index: test + name: test_alias + body: + index_routing: cat + +--- +alias with search routing fails: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + catch: /\[test\] is in time series mode which is incompatible with routing on aliases/ + indices.put_alias: + index: test + name: test_alias + body: + search_routing: cat diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml index 63716423913eb..9d2188fb8c4cb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml @@ -154,55 +154,5 @@ setup: - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' - match: {items.0.index.error.reason: "routing cannot be set because the destination index [test] is in time series mode"} ---- -"index into routed alias": - - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.15.0 - - - do: - indices.put_alias: - index: test - name: alias_with_routing - body: - index_routing: ir - - - do: - catch: /alias routing incompatible the destination index \[test\] because it is in time series mode/ - index: - index: alias_with_routing - body: - "@timestamp": "2021-04-28T18:35:24.467Z" - metricset: "pod" - k8s: - pod: - name: "cat" - uid: "947e4ced-1786-4e53-9e0c-5c447e959507" - ip: "10.10.55.1" - network: - tx: 2001818691 - rx: 802133794 - ---- -"index over bulk into routed alias": - - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 to be backported to 7.15.0 - - - do: - indices.put_alias: - index: test - name: alias_with_routing - body: - index_routing: ir - - - do: - bulk: - index: alias_with_routing - body: - - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' - - match: {items.0.index.error.reason: "alias routing incompatible the destination index [test] because it is in time series mode"} - # TODO should indexing without a @timestamp fail too? # TODO should an alias with search_routing fail? diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java index 9f289ef65ce48..630c1b56c15a0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java @@ -119,6 +119,12 @@ boolean removeIndex() { boolean apply(NewAliasValidator aliasValidator, Metadata.Builder metadata, IndexMetadata index) { aliasValidator.validate(alias, indexRouting, filter, writeIndex); + if (index.mode().organizeIntoTimeSeries() && (indexRouting != null || searchRouting != null)) { + throw new IllegalArgumentException( + "[" + index.getIndex().getName() + "] is in time series mode which is incompatible with routing on aliases" + ); + } + AliasMetadata newAliasMd = AliasMetadata.newAliasMetadataBuilder(alias).filter(filter).indexRouting(indexRouting) .searchRouting(searchRouting).writeIndex(writeIndex).isHidden(isHidden).build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java index 17b0c83e493e6..f2b417f006184 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java @@ -12,11 +12,12 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -561,6 +562,20 @@ public void testDataStreamAliasesWithWriteFlag() { assertThat(result.metadata().dataStreamAliases().get("logs-http"), nullValue()); } + public void testIndexRoutingInTimeSeriesMode() { + ClusterState before = createTimeSeriesIndex(ClusterState.builder(ClusterName.DEFAULT).build(), "test"); + AliasAction add = new AliasAction.Add("test", "alias", null, "index_routing", null, false, false); + Exception e = expectThrows(IllegalArgumentException.class, () -> service.applyAliasActions(before, List.of(add))); + assertThat(e.getMessage(), equalTo("[test] is in time series mode which is incompatible with routing on aliases")); + } + + public void testSearchRoutingInTimeSeriesMode() { + ClusterState before = createTimeSeriesIndex(ClusterState.builder(ClusterName.DEFAULT).build(), "test"); + AliasAction add = new AliasAction.Add("test", "alias", null, "search_routing", null, false, false); + Exception e = expectThrows(IllegalArgumentException.class, () -> service.applyAliasActions(before, List.of(add))); + assertThat(e.getMessage(), equalTo("[test] is in time series mode which is incompatible with routing on aliases")); + } + private ClusterState applyHiddenAliasMix(ClusterState before, Boolean isHidden1, Boolean isHidden2) { return service.applyAliasActions(before, Arrays.asList( new AliasAction.Add("test", "alias", null, null, null, null, isHidden1), @@ -579,6 +594,19 @@ private ClusterState createIndex(ClusterState state, String index) { .build(); } + private ClusterState createTimeSeriesIndex(ClusterState state, String index) { + IndexMetadata indexMetadata = IndexMetadata.builder(index) + .settings( + Settings.builder() + .put("index.version.created", VersionUtils.randomVersion(random())) + .put(IndexSettings.MODE.getKey(), "time_series") + ) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + return ClusterState.builder(state).metadata(Metadata.builder(state.metadata()).put(indexMetadata, false)).build(); + } + private void assertAliasesVersionUnchanged(final String index, final ClusterState before, final ClusterState after) { assertAliasesVersionUnchanged(new String[]{index}, before, after); } From 8f6e2bccc4fd53a00397798045e109e6abdf2aa4 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 5 Aug 2021 12:06:36 -0400 Subject: [PATCH 15/29] Remove todo --- .../resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml index 9d2188fb8c4cb..71b0394da80f6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml @@ -155,4 +155,3 @@ setup: - match: {items.0.index.error.reason: "routing cannot be set because the destination index [test] is in time series mode"} # TODO should indexing without a @timestamp fail too? -# TODO should an alias with search_routing fail? From 7538327eaab8f4a0a0381f9dfb7d4339ce103953 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Thu, 5 Aug 2021 12:35:47 -0400 Subject: [PATCH 16/29] Moar test --- .../xpack/ccr/FollowIndexIT.java | 20 +++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index b059601e7155f..4876a5d4dcf01 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -351,11 +351,27 @@ public void testFollowTsdbIndexCanNotOverrideMode() throws Exception { )); assertThat( e.getMessage(), - containsString("can not put follower index that could override leader settings {\\\"index.mode\\\":\\\"time_series\\\"}") + containsString("can not put follower index that could override leader settings {\\\"index.mode\\\":\\\"standard\\\"}") ); } - // TODO can't override tsdb mode setting + public void testFollowStandardIndexCanNotOverrideMode() throws Exception { + if (false == "follow".equals(targetCluster)) { + return; + } + logger.info("Running against follow cluster"); + Exception e = expectThrows(ResponseException.class, () -> followIndex( + client(), + "leader_cluster", + "test_index1", + "tsdb_follower_bad", + Settings.builder().put("index.mode", "time_series").build() + )); + assertThat( + e.getMessage(), + containsString("can not put follower index that could override leader settings {\\\"index.mode\\\":\\\"time_series\\\"}") + ); + } @Override protected Settings restClientSettings() { From c11b53c7b395b58ca2f9110135350c8a02c123ca Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Aug 2021 09:26:39 -0400 Subject: [PATCH 17/29] WIP --- .../test/tsdb/40_invalid_indexing.yml | 1 - .../test/tsdb/50_add_missing_dimensions.yml | 67 ++++++++++++++++++- 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml index 71b0394da80f6..bacd289bf114a 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_invalid_indexing.yml @@ -154,4 +154,3 @@ setup: - '{"@timestamp": "2021-04-28T18:03:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' - match: {items.0.index.error.reason: "routing cannot be set because the destination index [test] is in time series mode"} -# TODO should indexing without a @timestamp fail too? diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml index 8e28ad47be369..a47b65abc27f6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml @@ -1,5 +1,5 @@ --- -"add dimensions after creation": +"add dimensions with put_mapping": - skip: version: " - 7.99.99" reason: introduced in 8.0.0 to be backported to 7.15.0 @@ -53,3 +53,68 @@ - match: {hits.total.value: 1} - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} + + + + +--- +"add dimensions with dynamic_template": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + dynaimc_template: + keywords: + match_mapping_type: string + mapping: + type: keyword + dimension: true + mappings: + properties: + "@timestamp": + type: date + + - do: + catch: "/Error building time series id: There aren't any mapped dimensions/" + index: + index: test + refresh: true + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: cat + + - do: + indices.put_mapping: + index: test + body: + properties: + metricset: + type: keyword + dimension: true + + - do: + index: + index: test + refresh: true + body: + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: cat + + - do: + search: + index: test + body: + fields: + - field: _tsid + - field: "@timestamp" + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} + - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} From e7811e3161861f09723f360590e6f4f3045ad5e4 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Aug 2021 11:19:49 -0400 Subject: [PATCH 18/29] WIP --- .../test/tsdb/50_add_missing_dimensions.yml | 60 +++++++++++++++---- 1 file changed, 50 insertions(+), 10 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml index a47b65abc27f6..a9f26a57bc976 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml @@ -54,11 +54,8 @@ - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} - - - --- -"add dimensions with dynamic_template": +"add dimensions with dynamic_template over index": - skip: version: " - 7.99.99" reason: introduced in 8.0.0 to be backported to 7.15.0 @@ -70,7 +67,7 @@ settings: index: mode: time_series - dynaimc_template: + dynamic_templates: keywords: match_mapping_type: string mapping: @@ -91,15 +88,51 @@ metricset: cat - do: - indices.put_mapping: + index: index: test + refresh: true body: - properties: - metricset: - type: keyword - dimension: true + "@timestamp": "2021-04-28T18:35:24.467Z" + metricset: cat - do: + search: + index: test + body: + fields: + - field: _tsid + - field: "@timestamp" + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} + - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} + +--- +"add dimensions with dynamic_template over bulk": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + dynamic_templates: + keywords: + match_mapping_type: string + mapping: + type: keyword + dimension: true + mappings: + properties: + "@timestamp": + type: date + + - do: + catch: "/Error building time series id: There aren't any mapped dimensions/" index: index: test refresh: true @@ -107,6 +140,13 @@ "@timestamp": "2021-04-28T18:35:24.467Z" metricset: cat + - do: + bulk: + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "cat"}' + - do: search: index: test From b0e734f58fe0471f5fb8f9006473a4835209cab8 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 9 Aug 2021 13:47:37 -0400 Subject: [PATCH 19/29] WIP --- .../test/tsdb/50_add_missing_dimensions.yml | 124 ++++++++++++++---- .../index/mapper/DocumentParser.java | 6 +- 2 files changed, 106 insertions(+), 24 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml index a9f26a57bc976..dd9b2c57ceacf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml @@ -55,7 +55,7 @@ - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- -"add dimensions with dynamic_template over index": +"add dimensions to no dims with dynamic_template over index": - skip: version: " - 7.99.99" reason: introduced in 8.0.0 to be backported to 7.15.0 @@ -67,33 +67,69 @@ settings: index: mode: time_series - dynamic_templates: - keywords: - match_mapping_type: string - mapping: - type: keyword - dimension: true mappings: + dynamic_templates: + - keywords: + match_mapping_type: string + mapping: + type: keyword + dimension: true properties: "@timestamp": type: date - do: - catch: "/Error building time series id: There aren't any mapped dimensions/" index: index: test refresh: true body: "@timestamp": "2021-04-28T18:35:24.467Z" metricset: cat + - match: {result: created} - do: - index: + search: + index: test + body: + fields: + - field: _tsid + - field: "@timestamp" + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} + - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} + +--- +"add dimensions to no dims with dynamic_template over bulk": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + mappings: + dynamic_templates: + - keywords: + match_mapping_type: string + mapping: + type: keyword + dimension: true + properties: + "@timestamp": + type: date + + - do: + bulk: index: test refresh: true body: - "@timestamp": "2021-04-28T18:35:24.467Z" - metricset: cat + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "cat"}' + - is_false: errors - do: search: @@ -102,13 +138,12 @@ fields: - field: _tsid - field: "@timestamp" - - match: {hits.total.value: 1} - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- -"add dimensions with dynamic_template over bulk": +"add dimensions to some dims with dynamic_template over index": - skip: version: " - 7.99.99" reason: introduced in 8.0.0 to be backported to 7.15.0 @@ -120,32 +155,76 @@ settings: index: mode: time_series - dynamic_templates: - keywords: - match_mapping_type: string - mapping: - type: keyword - dimension: true mappings: + dynamic_templates: + - keywords: + match_mapping_type: string + mapping: + type: keyword + dimension: true properties: "@timestamp": type: date + metricset: + type: keyword + dimension: true - do: - catch: "/Error building time series id: There aren't any mapped dimensions/" index: index: test refresh: true body: "@timestamp": "2021-04-28T18:35:24.467Z" metricset: cat + other_dim: cat + - match: {result: created} + + - do: + search: + index: test + body: + fields: + - field: _tsid + - field: "@timestamp" + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{metricset: cat, other_dim: cat}]} + - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} + +--- +"add dimensions to some dims with dynamic_template over bulk": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.15.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + mappings: + dynamic_templates: + - keywords: + match_mapping_type: string + mapping: + type: keyword + dimension: true + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true - do: bulk: index: test + refresh: true body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "cat"}' + - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "cat", "other_dim": "cat"}' + - is_false: errors - do: search: @@ -154,7 +233,6 @@ fields: - field: _tsid - field: "@timestamp" - - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} + - match: {hits.hits.0.fields._tsid: [{metricset: cat, other_dim: cat}]} - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 254de38e525ac..8ff3bee4f7665 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -267,7 +267,11 @@ static Mapping createDynamicUpdate(MappingLookup mappingLookup, root = mappingLookup.getMapping().getRoot().copyAndReset(); } root.addRuntimeFields(dynamicRuntimeFields); - return mappingLookup.getMapping().mappingUpdate(root); + Mapping mapping = mappingLookup.getMapping().mappingUpdate(root); + if (mapping.getTimeSeriesIdGenerator() != mappingLookup.getMapping().getTimeSeriesIdGenerator()) { + throw new IllegalStateException("added a dimension with a dynamic mapping"); + } + return mapping; } private static RootObjectMapper createDynamicUpdate(MappingLookup mappingLookup, From 2bdd50da96438b4f54d79f496125c5a9d065eb92 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 10 Aug 2021 09:26:55 -0400 Subject: [PATCH 20/29] WIP --- .../org/elasticsearch/index/mapper/DocumentParser.java | 7 ++++--- .../index/mapper/TimeSeriesIdFieldMapper.java | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 8ff3bee4f7665..874d677fbfa9d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -268,9 +268,10 @@ static Mapping createDynamicUpdate(MappingLookup mappingLookup, } root.addRuntimeFields(dynamicRuntimeFields); Mapping mapping = mappingLookup.getMapping().mappingUpdate(root); - if (mapping.getTimeSeriesIdGenerator() != mappingLookup.getMapping().getTimeSeriesIdGenerator()) { - throw new IllegalStateException("added a dimension with a dynamic mapping"); - } +// if (false == mapping.getTimeSeriesIdGenerator().equals(mappingLookup.getMapping().getTimeSeriesIdGenerator())) { +// // NOCOMMIT we have to implements equals and hashCode for this to work. We haven't +// throw new IllegalStateException("added a dimension with a dynamic mapping"); +// } return mapping; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index b53a543510cee..c2a3dbbd3e401 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -94,7 +94,7 @@ public Query termQuery(Object value, SearchExecutionContext context) { } private TimeSeriesIdFieldMapper() { - super(FIELD_TYPE, Lucene.KEYWORD_ANALYZER); + super(FIELD_TYPE); } @Override From 3c303ae2d06ca7942cca1961f08338f9f2f899b7 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 10 Aug 2021 13:49:25 -0400 Subject: [PATCH 21/29] equals/hashCode --- .../index/TimeSeriesIdGenerator.java | 40 ++++++++++ .../index/mapper/DocumentParser.java | 8 +- .../index/mapper/IpFieldMapper.java | 17 ++++ .../index/mapper/KeywordFieldMapper.java | 17 ++++ .../index/mapper/NumberFieldMapper.java | 19 ++++- .../index/TimeSeriesIdGeneratorTests.java | 77 +++++++++++++++++-- 6 files changed, 166 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java index a9fded29960bb..ee7e3ba4690c2 100644 --- a/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java +++ b/server/src/main/java/org/elasticsearch/index/TimeSeriesIdGenerator.java @@ -171,6 +171,23 @@ public String toString() { return "extract dimensions using " + root; } + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + TimeSeriesIdGenerator other = (TimeSeriesIdGenerator) obj; + return root.equals(other.root); + } + + @Override + public int hashCode() { + return root.hashCode(); + } + /** * Build the tsid from the {@code _source}. See class docs for more on what it looks like and why. */ @@ -256,6 +273,12 @@ abstract void extract( ) throws IOException; abstract void collectDimensionNames(String name, Consumer consumer); + + @Override + public abstract boolean equals(Object obj); + + @Override + public abstract int hashCode(); } public static final class ObjectComponent extends Component { @@ -297,6 +320,23 @@ void collectDimensionNames(String name, Consumer consumer) { public String toString() { return components.toString(); } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + TimeSeriesIdGenerator.ObjectComponent other = (TimeSeriesIdGenerator.ObjectComponent) obj; + return components.equals(other.components); + } + + @Override + public int hashCode() { + return components.hashCode(); + } } public abstract static class LeafComponent extends Component { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 874d677fbfa9d..efdc1f4f4995c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -268,10 +268,10 @@ static Mapping createDynamicUpdate(MappingLookup mappingLookup, } root.addRuntimeFields(dynamicRuntimeFields); Mapping mapping = mappingLookup.getMapping().mappingUpdate(root); -// if (false == mapping.getTimeSeriesIdGenerator().equals(mappingLookup.getMapping().getTimeSeriesIdGenerator())) { -// // NOCOMMIT we have to implements equals and hashCode for this to work. We haven't -// throw new IllegalStateException("added a dimension with a dynamic mapping"); -// } + if (false == mapping.getTimeSeriesIdGenerator().equals(mappingLookup.getMapping().getTimeSeriesIdGenerator())) { + // NOCOMMIT we have to implements equals and hashCode for this to work. We haven't + throw new IllegalStateException("added a dimension with a dynamic mapping"); + } return mapping; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index d64c228043843..6bf69542b342b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -540,5 +540,22 @@ protected String extractString(XContentParser parser) throws IOException { public String toString() { return "ip[" + nullValue + "]"; } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + IpTsidGen other = (IpTsidGen) obj; + return Objects.equals(nullValue, other.nullValue); + } + + @Override + public int hashCode() { + return Objects.hash(nullValue); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index e5c50dceba284..e0183a487682b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -619,5 +619,22 @@ protected String extractString(XContentParser parser) throws IOException { public String toString() { return "kwd[" + nullValue + "]"; } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + KeywordTsidGen other = (KeywordTsidGen) obj; + return Objects.equals(nullValue, other.nullValue); + } + + @Override + public int hashCode() { + return Objects.hash(nullValue); + } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index b20906a90e5a7..281cfc5159c26 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1282,7 +1282,7 @@ private static class WholeNumberTsidGen extends TimeSeriesIdGenerator.LongLeaf { private final boolean coerce; WholeNumberTsidGen(NumberType numberType, Number nullValue, boolean coerce) { - this.numberType = numberType; + this.numberType = Objects.requireNonNull(numberType); this.nullValue = nullValue; this.coerce = coerce; } @@ -1300,5 +1300,22 @@ protected long extractLong(XContentParser parser) throws IOException { public String toString() { return numberType + "[" + nullValue + "," + coerce + "]"; } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + WholeNumberTsidGen other = (WholeNumberTsidGen) obj; + return numberType.equals(other.numberType) && Objects.equals(nullValue, other.nullValue) && coerce == other.coerce; + } + + @Override + public int hashCode() { + return Objects.hash(numberType, nullValue, coerce); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java index 74e605f466a59..be9ff5278468f 100644 --- a/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java +++ b/server/src/test/java/org/elasticsearch/index/TimeSeriesIdGeneratorTests.java @@ -11,6 +11,7 @@ import io.github.nik9000.mapmatcher.MapMatcher; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; @@ -26,6 +27,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.net.InetAddress; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -391,11 +393,7 @@ private Map modifyDimensionValue(Map doc, Map dimens Map result = new LinkedHashMap<>(doc.size()); for (Map.Entry e : doc.entrySet()) { if (e.getKey().equals(keyToModify)) { - Object val = e.getValue(); - Object modified = val instanceof Map - ? modifyDimensionValue((Map) val, (Map) dimensions.get(e.getKey())) - : val + "modified"; - result.put(e.getKey().toString(), modified); + result.put(e.getKey().toString(), modifyDimensionValue(dimensions, e.getKey(), e.getValue())); } else { result.put(e.getKey().toString(), e.getValue()); } @@ -403,6 +401,24 @@ private Map modifyDimensionValue(Map doc, Map dimens return result; } + private Object modifyDimensionValue(Map dimensions, Object key, Object val) { + if (val instanceof Map) { + return modifyDimensionValue((Map) val, (Map) dimensions.get(key)); + } + if (val instanceof Long) { + return ((Long) val).longValue() + 1; + } + if (val instanceof String) { + try { + InetAddress address = InetAddresses.forString((String) val); + return randomValueOtherThan(address, () -> NetworkAddress.format(randomIp(randomBoolean()))); + } catch (IllegalArgumentException e) { + return val + "modified"; + } + } + throw new IllegalArgumentException("don't know how to modify [" + val + "]"); + } + /** * Two documents with the same *values* but different dimension keys will generate * different {@code _tsid}s. @@ -464,6 +480,40 @@ public void testEmpty() throws IOException { assertThat(e.getMessage(), equalTo("There aren't any mapped dimensions")); } + public void testEquals() { + Map doc = randomDoc(between(1, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(doc); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); + assertThat(gen, equalTo(gen)); + assertThat(TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)), equalTo(gen)); + assertThat(TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)).hashCode(), equalTo(gen.hashCode())); + + Map otherDimensions = randomValueOtherThan(dimensions, () -> randomDimensionsFromDoc(doc)); + assertThat(TimeSeriesIdGenerator.build(objectComponentForDimensions(otherDimensions)), not(equalTo(gen))); + } + + public void testHashCodeSame() { + Map doc = randomDoc(between(1, 100), between(0, 2)); + Map dimensions = randomDimensionsFromDoc(doc); + TimeSeriesIdGenerator gen = TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)); + assertThat(TimeSeriesIdGenerator.build(objectComponentForDimensions(dimensions)).hashCode(), equalTo(gen.hashCode())); + } + + /** + * Test that two {@link TimeSeriesIdGenerator#hashCode()}s are different. + * Not randomized because you can't be sure that hashcode will always + * be different. + */ + public void testHashCodeDifferent() { + TimeSeriesIdGenerator gen1 = TimeSeriesIdGenerator.build(objectComponentForDimensions(Map.of("a", "cat", "b", 1))); + TimeSeriesIdGenerator gen2 = TimeSeriesIdGenerator.build( + objectComponentForDimensions( + randomFrom(Map.of("a", "cat"), Map.of("b", 1), Map.of("a", 1, "b", 1), Map.of("a", "cat", "b", "dog")) + ) + ); + assertThat(gen1.hashCode(), not(equalTo(gen2))); + } + /** * Removes one of the dimensions from a document. */ @@ -485,16 +535,29 @@ private Map removeDimension(Map doc, Map dimensions) } private LinkedHashMap randomDoc(int count, int subDepth) { - int keyLength = (int) Math.log(count) + 1; + int keyLength = (int) Math.log(count) + 1; // Use shorter keys for smaller tests so they fit better on the screen. LinkedHashMap doc = new LinkedHashMap<>(count); for (int i = 0; i < count; i++) { String key = randomValueOtherThanMany(doc::containsKey, () -> randomAlphaOfLength(keyLength)); - Object sub = subDepth <= 0 || randomBoolean() ? randomAlphaOfLength(5) : randomDoc(count, subDepth - 1); + Object sub = subDepth <= 0 || randomBoolean() ? randomDimensionVaue() : randomDoc(count, subDepth - 1); doc.put(key, sub); } return doc; } + private Object randomDimensionVaue() { + switch (between(0, 2)) { + case 0: + return randomAlphaOfLength(5); + case 1: + return NetworkAddress.format(randomIp(randomBoolean())); + case 2: + return randomLong(); + default: + throw new IllegalStateException("unknown random choice"); + } + } + /** * Extract a random subset of a document to use as dimensions. */ From 77a873d5ab5568ab1652a2ed58cc1e87ed368ecf Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 10 Aug 2021 15:08:05 -0400 Subject: [PATCH 22/29] Fail nicely --- .../test/tsdb/50_add_missing_dimensions.yml | 100 +++++++++--------- .../test/tsdb/60_dimension_types.yml | 1 + .../action/bulk/TransportShardBulkAction.java | 5 +- .../index/mapper/DocumentParser.java | 7 +- .../index/mapper/MapperService.java | 10 ++ 5 files changed, 66 insertions(+), 57 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml index dd9b2c57ceacf..b381a5276ab0d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_add_missing_dimensions.yml @@ -79,24 +79,25 @@ type: date - do: + catch: /There aren't any mapped dimensions/ # TODO allow the dynamic mapping update index: index: test refresh: true body: "@timestamp": "2021-04-28T18:35:24.467Z" metricset: cat - - match: {result: created} - - - do: - search: - index: test - body: - fields: - - field: _tsid - - field: "@timestamp" - - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} - - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} +# - match: {result: created} + +# - do: +# search: +# index: test +# body: +# fields: +# - field: _tsid +# - field: "@timestamp" +# - match: {hits.total.value: 1} +# - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} +# - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- "add dimensions to no dims with dynamic_template over bulk": @@ -129,18 +130,19 @@ body: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "cat"}' - - is_false: errors - - - do: - search: - index: test - body: - fields: - - field: _tsid - - field: "@timestamp" - - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} - - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} + - match: {items.0.index.error.reason: "Error building time series id: There aren't any mapped dimensions"} +# - is_false: errors + +# - do: +# search: +# index: test +# body: +# fields: +# - field: _tsid +# - field: "@timestamp" +# - match: {hits.total.value: 1} +# - match: {hits.hits.0.fields._tsid: [{metricset: cat}]} +# - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- "add dimensions to some dims with dynamic_template over index": @@ -170,6 +172,7 @@ dimension: true - do: + catch: /added a dimension with a dynamic mapping/ # TODO allow the dynamic mapping update index: index: test refresh: true @@ -177,18 +180,18 @@ "@timestamp": "2021-04-28T18:35:24.467Z" metricset: cat other_dim: cat - - match: {result: created} - - - do: - search: - index: test - body: - fields: - - field: _tsid - - field: "@timestamp" - - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{metricset: cat, other_dim: cat}]} - - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} +# - match: {result: created} + +# - do: +# search: +# index: test +# body: +# fields: +# - field: _tsid +# - field: "@timestamp" +# - match: {hits.total.value: 1} +# - match: {hits.hits.0.fields._tsid: [{metricset: cat, other_dim: cat}]} +# - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- "add dimensions to some dims with dynamic_template over bulk": @@ -224,15 +227,16 @@ body: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:24.467Z", "metricset": "cat", "other_dim": "cat"}' - - is_false: errors - - - do: - search: - index: test - body: - fields: - - field: _tsid - - field: "@timestamp" - - match: {hits.total.value: 1} - - match: {hits.hits.0.fields._tsid: [{metricset: cat, other_dim: cat}]} - - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} + - match: {items.0.index.error.reason: "added a dimension with a dynamic mapping"} +# - is_false: errors + +# - do: +# search: +# index: test +# body: +# fields: +# - field: _tsid +# - field: "@timestamp" +# - match: {hits.total.value: 1} +# - match: {hits.hits.0.fields._tsid: [{metricset: cat, other_dim: cat}]} +# - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml index 97454c553fff0..6711ef1ba6fb7 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_dimension_types.yml @@ -40,6 +40,7 @@ keyword dimension: - '{"@timestamp": "2021-04-28T18:35:44.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.1}' - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:54.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.3}' + - is_false: errors - do: search: diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index ad2143f57c969..800e78a9d837b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -290,9 +290,8 @@ static boolean executeBulkItemRequest(BulkPrimaryExecutionContext context, Updat if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { try { - primary.mapperService().merge(MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(result.getRequiredMappingUpdate(), XContentType.JSON, ToXContent.EMPTY_PARAMS), - MapperService.MergeReason.MAPPING_UPDATE_PREFLIGHT); + primary.mapperService().checkDynamicMappingUpdate(MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(result.getRequiredMappingUpdate(), XContentType.JSON, ToXContent.EMPTY_PARAMS)); } catch (Exception e) { logger.info(() -> new ParameterizedMessage("{} mapping update rejected by primary", primary.shardId()), e); onComplete(exceptionToResult(e, primary, isDelete, version), context, updateResult); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index efdc1f4f4995c..254de38e525ac 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -267,12 +267,7 @@ static Mapping createDynamicUpdate(MappingLookup mappingLookup, root = mappingLookup.getMapping().getRoot().copyAndReset(); } root.addRuntimeFields(dynamicRuntimeFields); - Mapping mapping = mappingLookup.getMapping().mappingUpdate(root); - if (false == mapping.getTimeSeriesIdGenerator().equals(mappingLookup.getMapping().getTimeSeriesIdGenerator())) { - // NOCOMMIT we have to implements equals and hashCode for this to work. We haven't - throw new IllegalStateException("added a dimension with a dynamic mapping"); - } - return mapping; + return mappingLookup.getMapping().mappingUpdate(root); } private static RootObjectMapper createDynamicUpdate(MappingLookup mappingLookup, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 6bd2e3e46effd..968dd10666150 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -272,6 +272,16 @@ public void merge(IndexMetadata indexMetadata, MergeReason reason) { } } + public void checkDynamicMappingUpdate(String type, CompressedXContent mappingSource) { + Mapping incomingMapping = parseMapping(type, mappingSource); + DocumentMapper oldMapper = this.mapper; + Mapping newMapping = mergeMappings(oldMapper, incomingMapping, MergeReason.MAPPING_UPDATE_PREFLIGHT); + newDocumentMapper(newMapping, MergeReason.MAPPING_UPDATE_PREFLIGHT); + if (false == newMapping.getTimeSeriesIdGenerator().equals(oldMapper.mapping().getTimeSeriesIdGenerator())) { + throw new IllegalStateException("added a dimension with a dynamic mapping"); + } + } + public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) { return mergeAndApplyMappings(type, mappingSource, reason); } From 8ec4db64cad41d3bbdd6b330e0ba9528b4ba0f5d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 10 Aug 2021 15:28:47 -0400 Subject: [PATCH 23/29] Fix settings --- .../org/elasticsearch/index/IndexMode.java | 22 +++++++++++++++++-- .../index/mapper/TimeSeriesIdFieldMapper.java | 1 - 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index ca137a1319ccc..7c2a239c33f1d 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -12,10 +12,15 @@ import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.MappedFieldType; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.stream.Stream; + +import static java.util.stream.Collectors.toSet; /** * The "mode" of the index. @@ -53,6 +58,11 @@ void validateWithOtherSettings(Map, Object> settings) { + "]" ); } + for (Setting unsupported : TIME_SERIES_UNSUPPORTED) { + if (false == Objects.equals(unsupported.getDefault(Settings.EMPTY), settings.get(unsupported))) { + throw new IllegalArgumentException("Can't set [" + unsupported.getKey() + "] in time series mode"); + } + } } @Override @@ -73,10 +83,18 @@ public void checkDocWriteRequest(OpType opType, String indexName) { ); } } - }; - static final List> VALIDATE_WITH_SETTINGS = List.of(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING); + private static final List> TIME_SERIES_UNSUPPORTED = List.of( + IndexSortConfig.INDEX_SORT_FIELD_SETTING, + IndexSortConfig.INDEX_SORT_ORDER_SETTING, + IndexSortConfig.INDEX_SORT_MODE_SETTING, + IndexSortConfig.INDEX_SORT_MISSING_SETTING + ); + + static final List> VALIDATE_WITH_SETTINGS = List.copyOf( + Stream.concat(Stream.of(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING), TIME_SERIES_UNSUPPORTED.stream()).collect(toSet()) + ); abstract void validateWithOtherSettings(Map, Object> settings); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index c2a3dbbd3e401..b1af7ab8862f2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -11,7 +11,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.search.Query; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; From 3162b2bf9d094b224916a4467f907d20f916508c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 10 Aug 2021 15:45:24 -0400 Subject: [PATCH 24/29] Better --- .../java/org/elasticsearch/index/mapper/MapperService.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 968dd10666150..caa5cc7f7b462 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.TimeSeriesIdGenerator; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -277,7 +278,9 @@ public void checkDynamicMappingUpdate(String type, CompressedXContent mappingSou DocumentMapper oldMapper = this.mapper; Mapping newMapping = mergeMappings(oldMapper, incomingMapping, MergeReason.MAPPING_UPDATE_PREFLIGHT); newDocumentMapper(newMapping, MergeReason.MAPPING_UPDATE_PREFLIGHT); - if (false == newMapping.getTimeSeriesIdGenerator().equals(oldMapper.mapping().getTimeSeriesIdGenerator())) { + + TimeSeriesIdGenerator oldTimeSeriesIdGenerator = oldMapper == null ? null : oldMapper.mapping().getTimeSeriesIdGenerator(); + if (false == Objects.equals(newMapping.getTimeSeriesIdGenerator(), oldTimeSeriesIdGenerator)) { throw new IllegalStateException("added a dimension with a dynamic mapping"); } } From 74339526779e4d55b941c5d623417ccb525817d9 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 11 Aug 2021 09:34:23 -0400 Subject: [PATCH 25/29] Update tests --- .../index/mapper/MapperServiceTests.java | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index 25f718ed45e6f..e9f5ad8bc1d1d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.VersionUtils; @@ -26,6 +27,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -40,6 +42,18 @@ public void testPreflightUpdateDoesNotChangeMapping() throws Throwable { assertThat("field was not created by mapping update", mapperService.fieldType("field0"), notNullValue()); } + public void testCheckDynamicMappingUpdate() throws IOException { + MapperService mapperService = createMapperService( + Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build(), + mapping(b -> b.startObject("@timestamp").field("type", "date").endObject()) + ); + CompressedXContent mappings = new CompressedXContent( + BytesReference.bytes(mapping(b -> b.startObject("dim").field("type", "keyword").field("dimension", true).endObject())) + ); + Exception e = expectThrows(IllegalStateException.class, () -> mapperService.checkDynamicMappingUpdate("_doc", mappings)); + assertThat(e.getMessage(), equalTo("added a dimension with a dynamic mapping")); + } + public void testMappingLookup() throws IOException { MapperService service = createMapperService(mapping(b -> {})); MappingLookup oldLookup = service.mappingLookup(); From 04bf6d60b92deaea2d562f17fab95335bc3c3cf2 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 11 Aug 2021 10:31:03 -0400 Subject: [PATCH 26/29] javadocs --- .../org/elasticsearch/index/mapper/SourceToParse.java | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java index ddb33e8746bd8..b7b2efbb32e26 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceToParse.java @@ -18,6 +18,11 @@ import java.util.function.Function; public class SourceToParse { + /** + * Create a Function that will return a {@link SourceToParse} that parses + * the {@link #timeSeriesId()} from the {@code _source} of the document + * once the {@link DocumentMapper} has been resolved. + */ public static Function parseTimeSeriesIdFromSource( String index, String id, @@ -37,6 +42,10 @@ public static Function parseTimeSeriesIdFromSourc ); } + /** + * Create a {@link SourceToParse} that parses the {@link #timeSeriesId()} from + * the {@code _source}. + */ public static SourceToParse parseTimeSeriesIdFromSource( String index, String id, From fc11cc56ee78f7378f864dbede573774d1fb2606 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 11 Aug 2021 10:53:08 -0400 Subject: [PATCH 27/29] Revert noops --- .../src/test/java/org/elasticsearch/upgrades/RecoveryIT.java | 1 + .../resources/rest-api-spec/test/mixed_cluster/10_basic.yml | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index ae26fc78373a1..41996f2b9e434 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -380,6 +380,7 @@ public void testRetentionLeasesEstablishedWhenRelocatingPrimary() throws Excepti ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); break; + case UPGRADED: ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 216c1d4b06e4c..806de5d4d987e 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -74,7 +74,6 @@ nodes.usage: {} - is_true: nodes - match: { _nodes.failed: 0 } - --- "Get index works": - do: @@ -107,4 +106,3 @@ - match: {index_templates.0.index_template.template.aliases.test_blias.index_routing: "b" } - match: {index_templates.0.index_template.template.aliases.test_blias.search_routing: "b" } - match: {index_templates.0.index_template.template.aliases.test_clias.filter.term.user: "kimchy" } - From 3b5aa95b21bddbf69ecbe2e554a282ba113e218c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 11 Aug 2021 11:00:40 -0400 Subject: [PATCH 28/29] cleanup --- .../org/elasticsearch/index/IndexSortConfig.java | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index ed0d401ef3877..497d0ceb2f8d0 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -120,23 +120,11 @@ public IndexSortConfig(IndexSettings indexSettings) { this.indexName = indexSettings.getIndex().getName(); this.indexMode = indexSettings.mode(); - List fields = INDEX_SORT_FIELD_SETTING.get(settings); if (indexMode.organizeIntoTimeSeries()) { - if (false == fields.isEmpty()) { - throw new IllegalArgumentException("Can't set [" + INDEX_SORT_FIELD_SETTING.getKey() + "] in time series mode"); - } - if (INDEX_SORT_ORDER_SETTING.exists(settings)) { - throw new IllegalArgumentException("Can't set [" + INDEX_SORT_ORDER_SETTING.getKey() + "] in time series mode"); - } - if (INDEX_SORT_MODE_SETTING.exists(settings)) { - throw new IllegalArgumentException("Can't set [" + INDEX_SORT_MODE_SETTING.getKey() + "] in time series mode"); - } - if (INDEX_SORT_MISSING_SETTING.exists(settings)) { - throw new IllegalArgumentException("Can't set [" + INDEX_SORT_MISSING_SETTING.getKey() + "] in time series mode"); - } this.sortSpecs = new FieldSortSpec[] { new FieldSortSpec("_tsid"), new FieldSortSpec("@timestamp") }; return; } + List fields = INDEX_SORT_FIELD_SETTING.get(settings); this.sortSpecs = fields.stream() .map((name) -> new FieldSortSpec(name)) .toArray(FieldSortSpec[]::new); From f34890ed59e60383de1d925f50b8433543649fe6 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 11 Aug 2021 11:23:43 -0400 Subject: [PATCH 29/29] More docs --- .../main/java/org/elasticsearch/index/mapper/Mapper.java | 7 +++++++ .../main/java/org/elasticsearch/index/mapper/Mapping.java | 7 +++++++ .../index/mapper/TimeSeriesIdFieldMapper.java | 5 +++++ 3 files changed, 19 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 0e95bbbf74719..0a3bd596426ac 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -67,6 +67,13 @@ public final String simpleName() { */ public abstract void validate(MappingLookup mappers); + /** + * Build a stand alone class that can generate the time series id for this + * field. This is a stand alone class because we need one of these for every + * index on every node in the cluster. And every node doesn't have the mapping + * parsed and in memory on every node on the cluster. But it can have this + * "little" thing in memory. + */ protected TimeSeriesIdGenerator.Component selectTimeSeriesIdComponents() { return null; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 2dd40599af03e..f2d7398ae7af6 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -202,6 +202,9 @@ public String toString() { } } + /** + * Generate the time series id if the index is organized by time series. + */ public BytesReference generateTimeSeriesIdIfNeeded(BytesReference source, XContentType xContentType) { if (timeSeriesIdGenerator == null) { return null; @@ -209,6 +212,10 @@ public BytesReference generateTimeSeriesIdIfNeeded(BytesReference source, XConte return timeSeriesIdGenerator.generate(source, xContentType); } + /** + * Get the time series is generator or {@code null} if the index + * isn't organized by time series. + */ public TimeSeriesIdGenerator getTimeSeriesIdGenerator() { return timeSeriesIdGenerator; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java index b1af7ab8862f2..40d29489dced3 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.search.Query; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; @@ -24,6 +25,10 @@ import java.util.List; import java.util.function.Supplier; +/** + * Mapper for {@code _tsid} field included generated when the index is + * {@link IndexMode#organizeIntoTimeSeries() organized into time series}. + */ public class TimeSeriesIdFieldMapper extends MetadataFieldMapper { public static final String NAME = "_tsid";