Skip to content

Commit

Permalink
Merge branch 'upstream/master' into move-tsdb-IT-to-yaml
Browse files Browse the repository at this point in the history
* upstream/master:
  Remove obsolete typed legacy index templates (elastic#80937)
  Remove unnecessary shuffle in unassigned shards allocation. (elastic#65172)
  TSDB: Tests for nanosecond timeprecision timestamp just beyond the limit (elastic#80932)

# Conflicts:
#	rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml
  • Loading branch information
weizijun committed Nov 23, 2021
2 parents 11fad5f + 0ed5eab commit 0f15c86
Show file tree
Hide file tree
Showing 11 changed files with 250 additions and 21 deletions.
2 changes: 1 addition & 1 deletion build-tools-internal/version.properties
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ elasticsearch = 8.1.0
lucene = 9.0.0-snapshot-cc2a31f2be8

bundled_jdk_vendor = adoptium
bundled_jdk = 17.0.1+12
bundled_jdk = 17+35

checkstyle = 8.42

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ set bad start_time and end_time:
time_series_dimension: true

- do:
catch: /time series index @timestamp value \[1632625781000\] must be larger than 1632625782000/
catch: /time series index @timestamp value \[2021-09-26T03:09:41Z\] must be larger than 2021-09-26T03:09:42Z/
index:
index: test_index
body: {
Expand All @@ -285,10 +285,169 @@ set bad start_time and end_time:
}

- do:
catch: /time series index @timestamp value \[1632625793000\] must be smaller than 1632625792000/
catch: /time series index @timestamp value \[2021-09-26T03:09:53Z\] must be smaller than 2021-09-26T03:09:52Z/
index:
index: test_index
body: {
"@timestamp": 1632625793000,
"metricset": "pod"
}

---
check start_time and end_time with data_nano:
- skip:
version: " - 8.0.99"
reason: introduced in 8.1.0
- do:
indices.create:
index: test_index
body:
settings:
index:
mode: time_series
routing_path: [metricset]
time_series:
start_time: 2021-09-26T03:09:42Z
end_time: 2021-09-26T03:09:52Z
mappings:
properties:
"@timestamp":
type: date_nanos
metricset:
type: keyword
time_series_dimension: true

- do:
index:
refresh: true
index: test_index
body: {
"@timestamp": "2021-09-26T03:09:51.123456789Z",
"metricset": "pod"
}

- do:
search:
index: test_index
body:
docvalue_fields: [ '@timestamp' ]
- match: { hits.total.value: 1 }
- match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:51.123456789Z" ] }

- do:
catch: /time series index @timestamp value \[2010-09-26T03:09:52.123456789Z\] must be larger than 2021-09-26T03:09:42Z/
index:
index: test_index
body: {
"@timestamp": "2010-09-26T03:09:52.123456789Z",
"metricset": "pod"
}

- do:
catch: /time series index @timestamp value \[2031-09-26T03:09:52.123456789Z\] must be smaller than 2021-09-26T03:09:52Z/
index:
index: test_index
body: {
"@timestamp": "2031-09-26T03:09:52.123456789Z",
"metricset": "pod"
}

---
check start_time boundary with data_nano:
- skip:
version: " - 8.0.99"
reason: introduced in 8.1.0
- do:
indices.create:
index: test_index
body:
settings:
index:
mode: time_series
routing_path: [metricset]
time_series:
start_time: 2021-09-26T03:09:42Z
end_time: 2021-09-26T03:09:52Z
mappings:
properties:
"@timestamp":
type: date_nanos
metricset:
type: keyword
time_series_dimension: true

- do:
index:
refresh: true
index: test_index
body: {
"@timestamp": "2021-09-26T03:09:42.123456789Z",
"metricset": "pod"
}

- do:
search:
index: test_index
body:
docvalue_fields: [ '@timestamp' ]
- match: { hits.total.value: 1 }
- match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:42.123456789Z" ] }

- do:
catch: /time series index @timestamp value \[2021-09-26T03:09:41.123456789Z\] must be larger than 2021-09-26T03:09:42Z/
index:
index: test_index
body: {
"@timestamp": "2021-09-26T03:09:41.123456789Z",
"metricset": "pod"
}

---
check end_time boundary with data_nano:
- skip:
version: " - 8.0.99"
reason: introduced in 8.1.0
- do:
indices.create:
index: test_index
body:
settings:
index:
mode: time_series
routing_path: [metricset]
time_series:
start_time: 2021-09-26T03:09:42Z
end_time: 2021-09-26T03:09:52Z
mappings:
properties:
"@timestamp":
type: date_nanos
metricset:
type: keyword
time_series_dimension: true

- do:
index:
refresh: true
index: test_index
body: {
"@timestamp": "2021-09-26T03:09:51.123456789Z",
"metricset": "pod"
}

- do:
search:
index: test_index
body:
docvalue_fields: [ '@timestamp' ]
- match: { hits.total.value: 1 }
- match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:51.123456789Z" ] }

- do:
catch: /time series index @timestamp value \[2021-09-26T03:09:52.123456789Z\] must be smaller than 2021-09-26T03:09:52Z/
index:
index: test_index
body: {
"@timestamp": "2021-09-26T03:09:52.123456789Z",
"metricset": "pod"
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.Index;
Expand Down Expand Up @@ -1068,11 +1067,6 @@ public boolean isIgnoredEmpty() {
return ignored.isEmpty();
}

public void shuffle() {
nodes.ensureMutable();
Randomness.shuffle(unassigned);
}

/**
* Drains all unassigned shards and returns it.
* This method will not drain ignored shards.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,6 @@ public ClusterState applyStartedShards(ClusterState clusterState, List<ShardRout
return clusterState;
}
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned shards, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(
allocationDeciders,
routingNodes,
Expand Down Expand Up @@ -197,8 +195,6 @@ public ClusterState applyFailedShards(
ClusterState tmpState = IndexMetadataUpdater.removeStaleIdsWithoutRoutings(clusterState, staleShards, logger);

RoutingNodes routingNodes = getMutableRoutingNodes(tmpState);
// shuffle the unassigned shards, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
long currentNanoTime = currentNanoTime();
RoutingAllocation allocation = new RoutingAllocation(
allocationDeciders,
Expand Down Expand Up @@ -274,8 +270,6 @@ public ClusterState applyFailedShards(
*/
public ClusterState disassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned shards, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(
allocationDeciders,
routingNodes,
Expand Down Expand Up @@ -479,8 +473,6 @@ public ClusterState reroute(ClusterState clusterState, String reason) {
ClusterState fixedClusterState = adaptAutoExpandReplicas(clusterState);

RoutingNodes routingNodes = getMutableRoutingNodes(fixedClusterState);
// shuffle the unassigned shards, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(
allocationDeciders,
routingNodes,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ public static boolean isTimeSeriesModeEnabled() {
*/
public static final Setting<Instant> TIME_SERIES_END_TIME = Setting.dateSetting(
"index.time_series.end_time",
DateUtils.MAX_NANOSECOND_INSTANT,
Instant.ofEpochMilli(DateUtils.MAX_MILLIS_BEFORE_9999),
new Setting.Validator<>() {
@Override
public void validate(Instant value) {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,14 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.mapper.DateFieldMapper.Resolution;
import org.elasticsearch.index.query.SearchExecutionContext;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentType;

import java.io.IOException;
import java.io.UncheckedIOException;
import java.time.Instant;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -215,19 +217,35 @@ private void validateTimestamp(IndexableField field, DocumentParserContext conte
return;
}

long value = field.numericValue().longValue();
long originValue = field.numericValue().longValue();
long value = originValue;

Resolution resolution;
if (context.mappingLookup().getMapper(DEFAULT_PATH).typeName().equals(DateFieldMapper.DATE_NANOS_CONTENT_TYPE)) {
resolution = Resolution.NANOSECONDS;
value /= NSEC_PER_MSEC;
} else {
resolution = Resolution.MILLISECONDS;
}

long startTime = context.indexSettings().getTimeSeriesStartTime();
if (value < startTime) {
throw new IllegalArgumentException("time series index @timestamp value [" + value + "] must be larger than " + startTime);
throw new IllegalArgumentException(
"time series index @timestamp value ["
+ resolution.toInstant(originValue)
+ "] must be larger than "
+ Instant.ofEpochMilli(startTime)
);
}

long endTime = context.indexSettings().getTimeSeriesEndTime();
if (value >= endTime) {
throw new IllegalArgumentException("time series index @timestamp value [" + value + "] must be smaller than " + endTime);
throw new IllegalArgumentException(
"time series index @timestamp value ["
+ resolution.toInstant(originValue)
+ "] must be smaller than "
+ Instant.ofEpochMilli(endTime)
);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateMetadata;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
Expand Down Expand Up @@ -41,7 +42,9 @@
import java.io.UncheckedIOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import java.util.function.UnaryOperator;

import static org.elasticsearch.index.engine.EngineConfig.INDEX_CODEC_SETTING;
import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME;
Expand Down Expand Up @@ -191,4 +194,13 @@ public String getFeatureName() {
public String getFeatureDescription() {
return "Enables Logstash Central Management pipeline storage";
}

@Override
public UnaryOperator<Map<String, IndexTemplateMetadata>> getIndexTemplateMetadataUpgrader() {
return templates -> {
// .logstash is a system index now. deleting the legacy template
templates.remove("logstash-index-template");
return templates;
};
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateMetadata;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
Expand Down Expand Up @@ -72,6 +73,7 @@
import java.util.Map;
import java.util.Set;
import java.util.function.Supplier;
import java.util.function.UnaryOperator;

import static org.elasticsearch.common.settings.Setting.boolSetting;

Expand Down Expand Up @@ -236,4 +238,17 @@ public void reload(Settings settings) throws Exception {
exporters.setExportersSetting(settingsForChangedExporter);
}
}

@Override
public UnaryOperator<Map<String, IndexTemplateMetadata>> getIndexTemplateMetadataUpgrader() {
return map -> {
// this template was not migrated to typeless due to the possibility of the old /_monitoring/bulk API being used
// see {@link org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils#OLD_TEMPLATE_VERSION}
// however the bulk API is not typed (the type field is for the docs, a field inside the docs) so it's safe to remove this
// old template and rely on the updated, typeless, .monitoring-alerts-7 template
map.remove(".monitoring-alerts");
return map;
};

}
}
Original file line number Diff line number Diff line change
Expand Up @@ -1531,6 +1531,8 @@ public UnaryOperator<Map<String, IndexTemplateMetadata>> getIndexTemplateMetadat
return templates -> {
// .security index is not managed by using templates anymore
templates.remove("security_audit_log");
// .security is a system index now. deleting another legacy template that's not used anymore
templates.remove("security-index-template");
return templates;
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -716,6 +716,11 @@ public void onIndexModule(IndexModule module) {
public UnaryOperator<Map<String, IndexTemplateMetadata>> getIndexTemplateMetadataUpgrader() {
return map -> {
map.keySet().removeIf(name -> name.startsWith("watch_history_"));
// watcher migrated to using system indices so these legacy templates are not needed anymore
map.remove(".watches");
map.remove(".triggered_watches");
// post 7.x we moved to typeless watch-history-10
map.remove(".watch-history-9");
return map;
};
}
Expand Down
Loading

0 comments on commit 0f15c86

Please sign in to comment.