Skip to content

Commit

Permalink
Add full cluster restart base class (#33577)
Browse files Browse the repository at this point in the history
This commit adds a base class for full cluster restart tests.
  • Loading branch information
jasontedor committed Sep 11, 2018
1 parent e084a0a commit d725416
Show file tree
Hide file tree
Showing 4 changed files with 115 additions and 124 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,8 @@
* version is started with the same data directories and then this is rerun
* with {@code tests.is_old_cluster} set to {@code false}.
*/
public class FullClusterRestartIT extends ESRestTestCase {
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
private final boolean supportsLenientBooleans = oldClusterVersion.before(Version.V_6_0_0_alpha1);
public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1);
private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0");

private String index;
Expand All @@ -77,29 +75,9 @@ public void setIndex() {
index = getTestName().toLowerCase(Locale.ROOT);
}

@Override
protected boolean preserveIndicesUponCompletion() {
return true;
}

@Override
protected boolean preserveSnapshotsUponCompletion() {
return true;
}

@Override
protected boolean preserveReposUponCompletion() {
return true;
}

@Override
protected boolean preserveTemplatesUponCompletion() {
return true;
}

public void testSearch() throws Exception {
int count;
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
{
Expand Down Expand Up @@ -165,7 +143,7 @@ public void testSearch() throws Exception {
}

public void testNewReplicasWork() throws Exception {
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
{
Expand Down Expand Up @@ -236,10 +214,10 @@ public void testNewReplicasWork() throws Exception {
*/
public void testAliasWithBadName() throws Exception {
assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before",
oldClusterVersion.before(VERSION_5_1_0_UNRELEASED));
getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED));

int count;
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
{
Expand Down Expand Up @@ -290,7 +268,7 @@ public void testAliasWithBadName() throws Exception {
Map<String, Object> searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search")));
int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp);
assertEquals(count, totalHits);
if (runningAgainstOldCluster == false) {
if (isRunningAgainstOldCluster() == false) {
// We can remove the alias.
Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName));
assertEquals(200, response.getStatusLine().getStatusCode());
Expand All @@ -301,7 +279,7 @@ public void testAliasWithBadName() throws Exception {
}

public void testClusterState() throws Exception {
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
mappingsAndSettings.field("template", index);
Expand Down Expand Up @@ -340,14 +318,14 @@ public void testClusterState() throws Exception {
assertEquals("0", numberOfReplicas);
Version version = Version.fromId(Integer.valueOf((String) XContentMapValues.extractValue("metadata.indices." + index +
".settings.index.version.created", clusterState)));
assertEquals(oldClusterVersion, version);
assertEquals(getOldClusterVersion(), version);

}

public void testShrink() throws IOException {
String shrunkenIndex = index + "_shrunk";
int numDocs;
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
{
Expand Down Expand Up @@ -412,12 +390,12 @@ public void testShrink() throws IOException {
public void testShrinkAfterUpgrade() throws IOException {
String shrunkenIndex = index + "_shrunk";
int numDocs;
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
// single type was added in 5.5.0 (see #24317)
if (oldClusterVersion.onOrAfter(Version.V_5_5_0) &&
oldClusterVersion.before(Version.V_6_0_0_beta1) &&
if (getOldClusterVersion().onOrAfter(Version.V_5_5_0) &&
getOldClusterVersion().before(Version.V_6_0_0_beta1) &&
randomBoolean()) {
{
// test that mapping.single_type is correctly propagated on the shrinked index,
Expand Down Expand Up @@ -478,7 +456,7 @@ public void testShrinkAfterUpgrade() throws IOException {
int totalHits = (int) XContentMapValues.extractValue("hits.total", response);
assertEquals(numDocs, totalHits);

if (runningAgainstOldCluster == false) {
if (isRunningAgainstOldCluster() == false) {
response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search")));
assertNoFailures(response);
totalShards = (int) XContentMapValues.extractValue("_shards.total", response);
Expand All @@ -503,7 +481,7 @@ public void testShrinkAfterUpgrade() throws IOException {
* </ol>
*/
public void testRollover() throws IOException {
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
Request createIndex = new Request("PUT", "/" + index + "-000001");
createIndex.setJsonEntity("{"
+ " \"aliases\": {"
Expand All @@ -524,7 +502,7 @@ public void testRollover() throws IOException {
bulkRequest.addParameter("refresh", "");
assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false"));

if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover");
rolloverRequest.setJsonEntity("{"
+ " \"conditions\": {"
Expand All @@ -542,7 +520,7 @@ public void testRollover() throws IOException {
Map<String, Object> count = entityAsMap(client().performRequest(countRequest));
assertNoFailures(count);

int expectedCount = bulkCount + (runningAgainstOldCluster ? 0 : bulkCount);
int expectedCount = bulkCount + (isRunningAgainstOldCluster() ? 0 : bulkCount);
assertEquals(expectedCount, (int) XContentMapValues.extractValue("hits.total", count));
}

Expand Down Expand Up @@ -701,7 +679,7 @@ public void testSingleDoc() throws IOException {
String docLocation = "/" + index + "/doc/1";
String doc = "{\"test\": \"test\"}";

if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
Request createDoc = new Request("PUT", docLocation);
createDoc.setJsonEntity(doc);
client().performRequest(createDoc);
Expand All @@ -716,7 +694,7 @@ public void testSingleDoc() throws IOException {
public void testEmptyShard() throws IOException {
final String index = "test_empty_shard";

if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
Settings.Builder settings = Settings.builder()
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
Expand All @@ -739,7 +717,7 @@ public void testEmptyShard() throws IOException {
public void testRecovery() throws Exception {
int count;
boolean shouldHaveTranslog;
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
count = between(200, 300);
/* We've had bugs in the past where we couldn't restore
* an index without a translog so we randomize whether
Expand Down Expand Up @@ -785,7 +763,7 @@ public void testRecovery() throws Exception {
String countResponse = toStr(client().performRequest(countRequest));
assertThat(countResponse, containsString("\"total\":" + count));

if (false == runningAgainstOldCluster) {
if (false == isRunningAgainstOldCluster()) {
boolean restoredFromTranslog = false;
boolean foundPrimary = false;
Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index);
Expand Down Expand Up @@ -813,7 +791,7 @@ public void testRecovery() throws Exception {
assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog);

String currentLuceneVersion = Version.CURRENT.luceneVersion.toString();
String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString();
String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString();
if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) {
int numCurrentVersion = 0;
int numBwcVersion = 0;
Expand Down Expand Up @@ -853,7 +831,7 @@ public void testRecovery() throws Exception {
*/
public void testSnapshotRestore() throws IOException {
int count;
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
// Create the index
count = between(200, 300);
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
Expand All @@ -873,7 +851,7 @@ public void testSnapshotRestore() throws IOException {
// Stick a routing attribute into to cluster settings so we can see it after the restore
Request addRoutingSettings = new Request("PUT", "/_cluster/settings");
addRoutingSettings.setJsonEntity(
"{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + oldClusterVersion + "\"}}");
"{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + getOldClusterVersion() + "\"}}");
client().performRequest(addRoutingSettings);

// Stick a template into the cluster so we can see it after the restore
Expand All @@ -898,7 +876,7 @@ public void testSnapshotRestore() throws IOException {
templateBuilder.startObject("alias2"); {
templateBuilder.startObject("filter"); {
templateBuilder.startObject("term"); {
templateBuilder.field("version", runningAgainstOldCluster ? oldClusterVersion : Version.CURRENT);
templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Version.CURRENT);
}
templateBuilder.endObject();
}
Expand All @@ -911,7 +889,7 @@ public void testSnapshotRestore() throws IOException {
createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder));
client().performRequest(createTemplateRequest);

if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
// Create the repo
XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); {
repoConfig.field("type", "fs");
Expand All @@ -927,19 +905,19 @@ public void testSnapshotRestore() throws IOException {
client().performRequest(createRepoRequest);
}

Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (runningAgainstOldCluster ? "old_snap" : "new_snap"));
Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap"));
createSnapshot.addParameter("wait_for_completion", "true");
createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}");
client().performRequest(createSnapshot);

checkSnapshot("old_snap", count, oldClusterVersion);
if (false == runningAgainstOldCluster) {
checkSnapshot("old_snap", count, getOldClusterVersion());
if (false == isRunningAgainstOldCluster()) {
checkSnapshot("new_snap", count, Version.CURRENT);
}
}

public void testHistoryUUIDIsAdded() throws Exception {
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
{
Expand Down Expand Up @@ -977,7 +955,7 @@ public void testHistoryUUIDIsAdded() throws Exception {
private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException {
// Check the snapshot metadata, especially the version
Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName);
if (false == (runningAgainstOldCluster && oldClusterVersion.before(Version.V_5_5_0))) {
if (false == (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_5_5_0))) {
listSnapshotRequest.addParameter("verbose", "true");
}
Map<String, Object> listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest));
Expand Down Expand Up @@ -1038,7 +1016,7 @@ private void checkSnapshot(String snapshotName, int count, Version tookOnVersion
Map<String, Object> expectedClusterSettings = new HashMap<>();
expectedClusterSettings.put("transient", emptyMap());
expectedClusterSettings.put("persistent",
singletonMap("cluster.routing.allocation.exclude.test_attr", oldClusterVersion.toString()));
singletonMap("cluster.routing.allocation.exclude.test_attr", getOldClusterVersion().toString()));
if (expectedClusterSettings.equals(clusterSettingsResponse) == false) {
NotEqualMessageBuilder builder = new NotEqualMessageBuilder();
builder.compareMaps(clusterSettingsResponse, expectedClusterSettings);
Expand All @@ -1048,7 +1026,7 @@ private void checkSnapshot(String snapshotName, int count, Version tookOnVersion
// Check that the template was restored successfully
Map<String, Object> getTemplateResponse = entityAsMap(client().performRequest(new Request("GET", "/_template/test_template")));
Map<String, Object> expectedTemplate = new HashMap<>();
if (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_0_0_beta1)) {
if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_0_0_beta1)) {
expectedTemplate.put("template", "evil_*");
} else {
expectedTemplate.put("index_patterns", singletonList("evil_*"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.elasticsearch.Version;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
Expand All @@ -48,7 +47,6 @@
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.test.rest.ESRestTestCase;

import java.io.ByteArrayInputStream;
import java.io.IOException;
Expand All @@ -71,7 +69,7 @@
* The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the
* json format of a query being tested here then feel free to change this.
*/
public class QueryBuilderBWCIT extends ESRestTestCase {
public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase {

private static final List<Object[]> CANDIDATES = new ArrayList<>();

Expand Down Expand Up @@ -145,33 +143,10 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) {
CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb});
}

private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));

@Override
protected boolean preserveIndicesUponCompletion() {
return true;
}

@Override
protected boolean preserveSnapshotsUponCompletion() {
return true;
}

@Override
protected boolean preserveReposUponCompletion() {
return true;
}

@Override
protected boolean preserveTemplatesUponCompletion() {
return true;
}

public void testQueryBuilderBWC() throws Exception {
assumeTrue("5.x not supported", oldClusterVersion.onOrAfter(Version.V_6_0_0_alpha1));
assumeTrue("5.x not supported", getOldClusterVersion().onOrAfter(Version.V_6_0_0_alpha1));
String index = "queries";
if (runningAgainstOldCluster) {
if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject();
{
Expand Down Expand Up @@ -231,7 +206,7 @@ public void testQueryBuilderBWC() throws Exception {
byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr);
try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) {
try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) {
input.setVersion(oldClusterVersion);
input.setVersion(getOldClusterVersion());
QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class);
assert in.read() == -1;
assertEquals(expectedQueryBuilder, queryBuilder);
Expand Down
Loading

0 comments on commit d725416

Please sign in to comment.