Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[TEST] Add few additional MDP tests #81274

Merged
merged 3 commits into from
Dec 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexFileNames;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
Expand All @@ -18,6 +19,7 @@
import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.NIOFSDirectory;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
Expand Down Expand Up @@ -63,6 +65,7 @@
import java.util.stream.StreamSupport;

import static org.apache.lucene.index.IndexWriter.WRITE_LOCK_NAME;
import static org.elasticsearch.gateway.PersistedClusterStateService.METADATA_DIRECTORY_NAME;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith;
Expand Down Expand Up @@ -595,7 +598,7 @@ public void testFailsIfGlobalMetadataIsMissing() throws IOException {
}

final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
try (Directory directory = newFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
try (Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME))) {
final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
Expand Down Expand Up @@ -633,8 +636,8 @@ public void testFailsIfGlobalMetadataIsDuplicated() throws IOException {
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
try (
Directory directory = newFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
Directory dupDirectory = newFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))
Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME));
Directory dupDirectory = newFSDirectory(dupPath.resolve(METADATA_DIRECTORY_NAME))
) {
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
indexWriter.addIndexes(dupDirectory);
Expand Down Expand Up @@ -693,8 +696,8 @@ public void testFailsIfIndexMetadataIsDuplicated() throws IOException {
final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
try (
Directory directory = newFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
Directory dupDirectory = newFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))
Directory directory = newFSDirectory(brokenPath.resolve(METADATA_DIRECTORY_NAME));
Directory dupDirectory = newFSDirectory(dupPath.resolve(METADATA_DIRECTORY_NAME))
) {
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
indexWriter.deleteDocuments(new Term("type", "global")); // do not duplicate global metadata
Expand Down Expand Up @@ -1214,11 +1217,7 @@ public void testLimitsFileCount() throws IOException {
writer.writeIncrementalStateAndCommit(1, previousClusterState, clusterState);

for (Path dataPath : nodeEnvironment.nodeDataPaths()) {
try (
DirectoryStream<Path> files = Files.newDirectoryStream(
dataPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)
)
) {
try (DirectoryStream<Path> files = Files.newDirectoryStream(dataPath.resolve(METADATA_DIRECTORY_NAME))) {

int fileCount = 0;
final List<String> fileNames = new ArrayList<>();
Expand Down Expand Up @@ -1251,6 +1250,88 @@ public void testLimitsFileCount() throws IOException {
}
}

public void testOverrideLuceneVersion() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final String clusterUUID = UUIDs.randomBase64UUID(random());
final long version = randomLongBetween(1L, Long.MAX_VALUE);

ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID).clusterUUIDCommitted(true).version(version)
)
.incrementVersion()
.build()
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
assertTrue(clusterState.metadata().clusterUUIDCommitted());
assertThat(clusterState.metadata().version(), equalTo(version));

}
NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths());
assertEquals(Version.CURRENT, prevMetadata.nodeVersion());
PersistedClusterStateService.overrideVersion(Version.V_8_0_0, persistedClusterStateService.getDataPaths());
NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths());
assertEquals(Version.V_8_0_0, metadata.nodeVersion());
for (Path p : persistedClusterStateService.getDataPaths()) {
NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p);
assertEquals(Version.V_8_0_0, individualMetadata.nodeVersion());
}
}
}

public void testDeleteAllPaths() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
final String clusterUUID = UUIDs.randomBase64UUID(random());
final long version = randomLongBetween(1L, Long.MAX_VALUE);

ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
try (Writer writer = persistedClusterStateService.createWriter()) {
writer.writeFullStateAndCommit(
0L,
ClusterState.builder(clusterState)
.metadata(
Metadata.builder(clusterState.metadata()).clusterUUID(clusterUUID).clusterUUIDCommitted(true).version(version)
)
.incrementVersion()
.build()
);
clusterState = loadPersistedClusterState(persistedClusterStateService);
assertThat(clusterState.metadata().clusterUUID(), equalTo(clusterUUID));
assertTrue(clusterState.metadata().clusterUUIDCommitted());
assertThat(clusterState.metadata().version(), equalTo(version));
}

for (Path dataPath : persistedClusterStateService.getDataPaths()) {
assertTrue(findSegmentInDirectory(dataPath));
}

PersistedClusterStateService.deleteAll(persistedClusterStateService.getDataPaths());

for (Path dataPath : persistedClusterStateService.getDataPaths()) {
assertFalse(findSegmentInDirectory(dataPath));
}
}
}

private boolean findSegmentInDirectory(Path dataPath) throws IOException {
Directory d = new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME));

for (final String file : d.listAll()) {
if (file.startsWith(IndexFileNames.SEGMENTS)) {
return true;
}
}

return false;
}

private void assertExpectedLogs(
long currentTerm,
ClusterState previousState,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import joptsimple.OptionSet;

import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.rollover.Condition;
Expand Down Expand Up @@ -101,18 +102,23 @@ public void setup() throws IOException {
RecoverySource.EmptyStoreRecoverySource.INSTANCE
);

final Path dataDir = createTempDir();
dataPaths = new Path[] { createTempDir(), createTempDir(), createTempDir() };
final String[] tmpPaths = Arrays.stream(dataPaths).map(s -> s.toAbsolutePath().toString()).toArray(String[]::new);
int randomPath = TestUtil.nextInt(random(), 0, dataPaths.length - 1);
final Path tempDir = dataPaths[randomPath];

environment = TestEnvironment.newEnvironment(
Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), dataDir)
.putList(Environment.PATH_DATA_SETTING.getKey(), dataDir.toAbsolutePath().toString())
.put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
.putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths)
.build()
);

// create same directory structure as prod does
Files.createDirectories(dataDir);
dataPaths = new Path[] { dataDir };
for (Path dataPath : dataPaths) {
Files.createDirectories(dataPath);
}

final Settings settings = Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
Expand All @@ -121,7 +127,7 @@ public void setup() throws IOException {
.put(IndexMetadata.SETTING_INDEX_UUID, shardId.getIndex().getUUID())
.build();

final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(dataDir);
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(tempDir);
shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);

// Adding rollover info to IndexMetadata to check that NamedXContentRegistry is properly configured
Expand All @@ -142,10 +148,10 @@ public void setup() throws IOException {
clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(Metadata.builder().put(indexMetadata, false).build()).build();

try (NodeEnvironment.NodeLock lock = new NodeEnvironment.NodeLock(logger, environment, Files::exists)) {
final Path[] dataPaths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
final Path[] paths = Arrays.stream(lock.getNodePaths()).filter(Objects::nonNull).map(p -> p.path).toArray(Path[]::new);
try (
PersistedClusterStateService.Writer writer = new PersistedClusterStateService(
dataPaths,
paths,
nodeId,
xContentRegistry(),
BigArrays.NON_RECYCLING_INSTANCE,
Expand Down