Skip to content

Commit

Permalink
Ignore metadata of deleted indices at start (#48918)
Browse files Browse the repository at this point in the history
Today in 6.x it is possible to add an index tombstone to the graveyard without
deleting the corresponding index metadata, because the deletion is slightly
deferred. If you shut down the node and upgrade to 7.x when in this state then
the node will fail to apply any cluster states, reporting

    java.lang.IllegalStateException: Cannot delete index [...], it is still part of the cluster state.

This commit addresses this situation by skipping over any index metadata with a
corresponding tombstone, allowing this metadata to be cleaned up by the 7.x
node.
  • Loading branch information
DaveCTurner authored Nov 12, 2019
1 parent 049e122 commit 13170a7
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.Manifest;
import org.elasticsearch.cluster.metadata.MetaData;
Expand Down Expand Up @@ -115,12 +116,15 @@ private Tuple<Manifest, MetaData> loadFullStateBWC() throws IOException {
MetaData globalMetaData = metaDataAndGeneration.v1();
long globalStateGeneration = metaDataAndGeneration.v2();

final IndexGraveyard indexGraveyard;
if (globalMetaData != null) {
metaDataBuilder = MetaData.builder(globalMetaData);
indexGraveyard = globalMetaData.custom(IndexGraveyard.TYPE);
// TODO https://github.com/elastic/elasticsearch/issues/38556
// assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0";
} else {
metaDataBuilder = MetaData.builder();
indexGraveyard = IndexGraveyard.builder().build();
}

for (String indexFolderName : nodeEnv.availableIndexFolders()) {
Expand All @@ -132,8 +136,13 @@ private Tuple<Manifest, MetaData> loadFullStateBWC() throws IOException {
IndexMetaData indexMetaData = indexMetaDataAndGeneration.v1();
long generation = indexMetaDataAndGeneration.v2();
if (indexMetaData != null) {
indices.put(indexMetaData.getIndex(), generation);
metaDataBuilder.put(indexMetaData, false);
if (indexGraveyard.containsIndex(indexMetaData.getIndex())) {
logger.debug("[{}] found metadata for deleted index [{}]", indexFolderName, indexMetaData.getIndex());
// this index folder is cleared up when state is recovered
} else {
indices.put(indexMetaData.getIndex(), generation);
metaDataBuilder.put(indexMetaData, false);
}
} else {
logger.debug("[{}] failed to find metadata for existing index location", indexFolderName);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,20 +30,24 @@
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.coordination.CoordinationMetaData;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.Manifest;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.indices.IndexClosedException;
Expand All @@ -54,6 +58,8 @@
import org.elasticsearch.test.InternalTestCluster.RestartCallback;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
Expand All @@ -66,6 +72,7 @@
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.notNullValue;
Expand Down Expand Up @@ -508,6 +515,42 @@ public void testArchiveBrokenClusterSettings() throws Exception {
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
}

public void testHalfDeletedIndexImport() throws Exception {
// It's possible for a 6.x node to add a tombstone for an index but not actually delete the index metadata from disk since that
// deletion is slightly deferred and may race against the node being shut down; if you upgrade to 7.x when in this state then the
// node won't start.

internalCluster().startNode();
createIndex("test", Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.build());
ensureGreen("test");

final MetaData metaData = internalCluster().getInstance(ClusterService.class).state().metaData();
final Path[] paths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths();
writeBrokenMeta(metaStateService -> {
metaStateService.writeGlobalState("test", MetaData.builder(metaData)
// we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the
// term in the coordination metadata
.coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()).term(0L).build())
// add a tombstone but do not delete the index metadata from disk
.putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metaData.index("test").getIndex()).build()).build());
for (final Path path : paths) {
try (Stream<Path> stateFiles = Files.list(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) {
for (final Path manifestPath : stateFiles
.filter(p -> p.getFileName().toString().startsWith(Manifest.FORMAT.getPrefix())).collect(Collectors.toList())) {
IOUtils.rm(manifestPath);
}
}
}
});

ensureGreen();

assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty()));
}

private void writeBrokenMeta(CheckedConsumer<MetaStateService, IOException> writer) throws Exception {
Map<String, MetaStateService> metaStateServices = Stream.of(internalCluster().getNodeNames())
.collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(MetaStateService.class, nodeName)));
Expand Down

0 comments on commit 13170a7

Please sign in to comment.