diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index d17f0bf94e3b7..661edbbeab1cd 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -21,8 +21,11 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -33,7 +36,8 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import java.io.IOException; -import java.util.Collections; + +import static java.util.Collections.emptySet; /** * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API. @@ -55,7 +59,7 @@ public final class IndicesClient { */ public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - Collections.emptySet(), headers); + emptySet(), headers); } /** @@ -66,7 +70,7 @@ public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header. */ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } /** @@ -77,7 +81,7 @@ public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } /** @@ -99,7 +103,7 @@ public void createAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); + } + + /** + * Updates aliases using the Index Aliases API + *

+ * See + * Index Aliases API on elastic.co + */ + public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, Request::updateAliases, + IndicesAliasesResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously updates aliases using the Index Aliases API + *

+ * See + * Index Aliases API on elastic.co + */ + public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequestRequest, ActionListener listener, + Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequestRequest, Request::updateAliases, + IndicesAliasesResponse::fromXContent, listener, emptySet(), headers); } /** @@ -122,7 +151,7 @@ public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener< */ public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, - Collections.emptySet(), headers); + emptySet(), headers); } /** @@ -133,7 +162,7 @@ public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... heade */ public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); } /** @@ -144,7 +173,7 @@ public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, - listener, Collections.emptySet(), headers); + listener, emptySet(), headers); + } + + /** + * Checks if one or more aliases exist using the Aliases Exist API + *

+ * See + * Indices Aliases API on elastic.co + */ + public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequest(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, + emptySet(), headers); + } + + /** + * Asynchronously checks if one or more aliases exist using the Aliases Exist API + *

+ * See + * Indices Aliases API on elastic.co + */ + public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsync(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, + listener, emptySet(), headers); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 229e45498aa95..04c10f28b2e0b 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -29,7 +29,9 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -132,7 +134,7 @@ static Request delete(DeleteRequest deleteRequest) { } static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { - String endpoint = endpoint(deleteIndexRequest.indices(), Strings.EMPTY_ARRAY, ""); + String endpoint = endpoint(deleteIndexRequest.indices()); Params parameters = Params.builder(); parameters.withTimeout(deleteIndexRequest.timeout()); @@ -143,7 +145,7 @@ static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { } static Request openIndex(OpenIndexRequest openIndexRequest) { - String endpoint = endpoint(openIndexRequest.indices(), Strings.EMPTY_ARRAY, "_open"); + String endpoint = endpoint(openIndexRequest.indices(), "_open"); Params parameters = Params.builder(); @@ -156,7 +158,7 @@ static Request openIndex(OpenIndexRequest openIndexRequest) { } static Request closeIndex(CloseIndexRequest closeIndexRequest) { - String endpoint = endpoint(closeIndexRequest.indices(), Strings.EMPTY_ARRAY, "_close"); + String endpoint = endpoint(closeIndexRequest.indices(), "_close"); Params parameters = Params.builder(); @@ -168,7 +170,7 @@ static Request closeIndex(CloseIndexRequest closeIndexRequest) { } static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { - String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, ""); + String endpoint = endpoint(createIndexRequest.indices()); Params parameters = Params.builder(); parameters.withTimeout(createIndexRequest.timeout()); @@ -178,6 +180,15 @@ static Request createIndex(CreateIndexRequest createIndexRequest) throws IOExcep HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE); return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); } + + static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException { + Params parameters = Params.builder(); + parameters.withTimeout(indicesAliasesRequest.timeout()); + parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout()); + + HttpEntity entity = createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpPost.METHOD_NAME, "/_aliases", parameters.getParams(), entity); + } static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { // The concreteIndex is an internal concept, not applicable to requests made over the REST API. @@ -348,7 +359,7 @@ static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { parameters.withRealtime(multiGetRequest.realtime()); parameters.withRefresh(multiGetRequest.refresh()); HttpEntity entity = createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpGet.METHOD_NAME, "/_mget", parameters.getParams(), entity); + return new Request(HttpPost.METHOD_NAME, "/_mget", parameters.getParams(), entity); } static Request index(IndexRequest indexRequest) { @@ -437,17 +448,17 @@ static Request search(SearchRequest searchRequest) throws IOException { if (searchRequest.source() != null) { entity = createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE); } - return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity); + return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity); } static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { HttpEntity entity = createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request("GET", "/_search/scroll", Collections.emptyMap(), entity); + return new Request(HttpPost.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity); } static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException { HttpEntity entity = createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request("DELETE", "/_search/scroll", Collections.emptyMap(), entity); + return new Request(HttpDelete.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity); } static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { @@ -459,7 +470,18 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type())); - return new Request("GET", "/_msearch", params.getParams(), entity); + return new Request(HttpPost.METHOD_NAME, "/_msearch", params.getParams(), entity); + } + + static Request existsAlias(GetAliasesRequest getAliasesRequest) { + Params params = Params.builder(); + params.withIndicesOptions(getAliasesRequest.indicesOptions()); + params.withLocal(getAliasesRequest.local()); + if (getAliasesRequest.indices().length == 0 && getAliasesRequest.aliases().length == 0) { + throw new IllegalArgumentException("existsAlias requires at least an alias or an index"); + } + String endpoint = endpoint(getAliasesRequest.indices(), "_alias", getAliasesRequest.aliases()); + return new Request("HEAD", endpoint, params.getParams(), null); } private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { @@ -467,8 +489,28 @@ private static HttpEntity createEntity(ToXContent toXContent, XContentType xCont return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } + static String endpoint(String index, String type, String id) { + return buildEndpoint(index, type, id); + } + + static String endpoint(String index, String type, String id, String endpoint) { + return buildEndpoint(index, type, id, endpoint); + } + + static String endpoint(String[] indices) { + return buildEndpoint(String.join(",", indices)); + } + + static String endpoint(String[] indices, String endpoint) { + return buildEndpoint(String.join(",", indices), endpoint); + } + static String endpoint(String[] indices, String[] types, String endpoint) { - return endpoint(String.join(",", indices), String.join(",", types), endpoint); + return buildEndpoint(String.join(",", indices), String.join(",", types), endpoint); + } + + static String endpoint(String[] indices, String endpoint, String[] suffixes) { + return buildEndpoint(String.join(",", indices), endpoint, String.join(",", suffixes)); } static String endpoint(String[] indices, String endpoint, String type) { @@ -476,9 +518,9 @@ static String endpoint(String[] indices, String endpoint, String type) { } /** - * Utility method to build request's endpoint. + * Utility method to build request's endpoint given its parts as strings */ - static String endpoint(String... parts) { + static String buildEndpoint(String... parts) { StringJoiner joiner = new StringJoiner("/", "/", ""); for (String part : parts) { if (Strings.hasLength(part)) { @@ -646,6 +688,11 @@ Params withIndicesOptions(IndicesOptions indicesOptions) { return this; } + Params withLocal(boolean local) { + putParam("local", Boolean.toString(local)); + return this; + } + Map getParams() { return Collections.unmodifiableMap(params); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 2f81479f93a64..6fb9bd4c0fd35 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -20,9 +20,14 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -32,6 +37,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; @@ -43,7 +49,9 @@ import java.util.Map; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -165,6 +173,97 @@ public void testDeleteIndex() throws IOException { } } + @SuppressWarnings("unchecked") + public void testUpdateAliases() throws IOException { + String index = "index"; + String alias = "alias"; + + createIndex(index); + assertThat(aliasExists(index, alias), equalTo(false)); + assertThat(aliasExists(alias), equalTo(false)); + + IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); + AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index(index).aliases(alias); + addAction.routing("routing").searchRouting("search_routing").filter("{\"term\":{\"year\":2016}}"); + aliasesAddRequest.addAliasAction(addAction); + IndicesAliasesResponse aliasesAddResponse = execute(aliasesAddRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync); + assertTrue(aliasesAddResponse.isAcknowledged()); + assertThat(aliasExists(alias), equalTo(true)); + assertThat(aliasExists(index, alias), equalTo(true)); + Map getAlias = getAlias(index, alias); + assertThat(getAlias.get("index_routing"), equalTo("routing")); + assertThat(getAlias.get("search_routing"), equalTo("search_routing")); + Map filter = (Map) getAlias.get("filter"); + Map term = (Map) filter.get("term"); + assertEquals(2016, term.get("year")); + + String alias2 = "alias2"; + IndicesAliasesRequest aliasesAddRemoveRequest = new IndicesAliasesRequest(); + addAction = new AliasActions(AliasActions.Type.ADD).indices(index).alias(alias2); + aliasesAddRemoveRequest.addAliasAction(addAction); + AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index(index).alias(alias); + aliasesAddRemoveRequest.addAliasAction(removeAction); + IndicesAliasesResponse aliasesAddRemoveResponse = execute(aliasesAddRemoveRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync); + assertTrue(aliasesAddRemoveResponse.isAcknowledged()); + assertThat(aliasExists(alias), equalTo(false)); + assertThat(aliasExists(alias2), equalTo(true)); + assertThat(aliasExists(index, alias), equalTo(false)); + assertThat(aliasExists(index, alias2), equalTo(true)); + + IndicesAliasesRequest aliasesRemoveIndexRequest = new IndicesAliasesRequest(); + AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index(index); + aliasesRemoveIndexRequest.addAliasAction(removeIndexAction); + IndicesAliasesResponse aliasesRemoveIndexResponse = execute(aliasesRemoveIndexRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync); + assertTrue(aliasesRemoveIndexResponse.isAcknowledged()); + assertThat(aliasExists(alias), equalTo(false)); + assertThat(aliasExists(alias2), equalTo(false)); + assertThat(aliasExists(index, alias), equalTo(false)); + assertThat(aliasExists(index, alias2), equalTo(false)); + assertThat(indexExists(index), equalTo(false)); + } + + public void testAliasesNonExistentIndex() throws IOException { + String index = "index"; + String alias = "alias"; + String nonExistentIndex = "non_existent_index"; + + IndicesAliasesRequest nonExistentIndexRequest = new IndicesAliasesRequest(); + nonExistentIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias)); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(nonExistentIndexRequest, + highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); + assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); + assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); + + createIndex(index); + IndicesAliasesRequest mixedRequest = new IndicesAliasesRequest(); + mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).indices(index).aliases(alias)); + mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE).indices(nonExistentIndex).alias(alias)); + exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); + assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); + assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); + assertThat(exception.getMetadata("es.index"), not(hasItem(index))); + assertThat(aliasExists(index, alias), equalTo(false)); + assertThat(aliasExists(alias), equalTo(false)); + + IndicesAliasesRequest removeIndexRequest = new IndicesAliasesRequest(); + removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias)); + removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE_INDEX).indices(nonExistentIndex)); + exception = expectThrows(ElasticsearchException.class, () -> execute(removeIndexRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync)); + assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); + assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); + assertThat(exception.getMetadata("es.index"), not(hasItem(index))); + assertThat(aliasExists(index, alias), equalTo(false)); + assertThat(aliasExists(alias), equalTo(false)); + } + public void testOpenExistingIndex() throws IOException { String index = "index"; createIndex(index); @@ -245,6 +344,32 @@ private static void closeIndex(String index) throws IOException { assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); } + private static boolean aliasExists(String alias) throws IOException { + Response response = client().performRequest("HEAD", "/_alias/" + alias); + return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); + } + + private static boolean aliasExists(String index, String alias) throws IOException { + Response response = client().performRequest("HEAD", "/" + index + "/_alias/" + alias); + return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); + } + + public void testExistsAlias() throws IOException { + GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias"); + assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + + createIndex("index"); + client().performRequest("PUT", "/index/_alias/alias"); + assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + + GetAliasesRequest getAliasesRequest2 = new GetAliasesRequest(); + getAliasesRequest2.aliases("alias"); + getAliasesRequest2.indices("index"); + assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + getAliasesRequest2.indices("does_not_exist"); + assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + } + @SuppressWarnings("unchecked") private Map getIndexMetadata(String index) throws IOException { Response response = client().performRequest("GET", index); @@ -258,4 +383,26 @@ private Map getIndexMetadata(String index) throws IOException { return indexMetaData; } + + @SuppressWarnings({ "unchecked", "rawtypes" }) + private static Map getAlias(final String index, final String alias) throws IOException { + String endpoint = "/_alias"; + if (false == Strings.isEmpty(index)) { + endpoint = index + endpoint; + } + if (false == Strings.isEmpty(alias)) { + endpoint = endpoint + "/" + alias; + } + Map performGet = performGet(endpoint); + return (Map) ((Map) ((Map) performGet.get(index)).get("aliases")).get(alias); + } + + private static Map performGet(final String endpoint) throws IOException { + Response response = client().performRequest("GET", endpoint); + XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + Map responseEntity = XContentHelper.convertToMap(entityContentType.xContent(), response.getEntity().getContent(), + false); + assertNotNull(responseEntity); + return responseEntity; + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 0ddaf1de1ca52..e76833f84a0d7 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -20,12 +20,20 @@ package org.elasticsearch.client; import org.apache.http.HttpEntity; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; @@ -93,6 +101,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.Request.enforceSameContentType; +import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.CoreMatchers.equalTo; @@ -135,7 +144,7 @@ public void testPing() { assertEquals("/", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertNull(request.getEntity()); - assertEquals("HEAD", request.getMethod()); + assertEquals(HttpHead.METHOD_NAME, request.getMethod()); } public void testInfo() { @@ -143,11 +152,11 @@ public void testInfo() { assertEquals("/", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertNull(request.getEntity()); - assertEquals("GET", request.getMethod()); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); } public void testGet() { - getAndExistsTest(Request::get, "GET"); + getAndExistsTest(Request::get, HttpGet.METHOD_NAME); } public void testMultiGet() throws IOException { @@ -197,7 +206,7 @@ public void testMultiGet() throws IOException { } Request request = Request.multiGet(multiGetRequest); - assertEquals("GET", request.getMethod()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_mget", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(multiGetRequest, request.getEntity()); @@ -237,7 +246,7 @@ public void testDelete() { } public void testExists() { - getAndExistsTest(Request::exists, "HEAD"); + getAndExistsTest(Request::exists, HttpHead.METHOD_NAME); } private static void getAndExistsTest(Function requestConverter, String method) { @@ -314,10 +323,25 @@ public void testCreateIndex() throws IOException { Request request = Request.createIndex(createIndexRequest); assertEquals("/" + indexName, request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - assertEquals("PUT", request.getMethod()); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); assertToXContentBody(createIndexRequest, request.getEntity()); } + public void testUpdateAliases() throws IOException { + IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); + AliasActions aliasAction = randomAliasAction(); + indicesAliasesRequest.addAliasAction(aliasAction); + + Map expectedParams = new HashMap<>(); + setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomMasterTimeout(indicesAliasesRequest, expectedParams); + + Request request = Request.updateAliases(indicesAliasesRequest); + assertEquals("/_aliases", request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(indicesAliasesRequest, request.getEntity()); + } + public void testPutMapping() throws IOException { PutMappingRequest putMappingRequest = new PutMappingRequest(); @@ -347,7 +371,7 @@ public void testPutMapping() throws IOException { assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - assertEquals("PUT", request.getMethod()); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); assertToXContentBody(putMappingRequest, request.getEntity()); } @@ -364,7 +388,7 @@ public void testDeleteIndex() { Request request = Request.deleteIndex(deleteIndexRequest); assertEquals("/" + String.join(",", indices), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - assertEquals("DELETE", request.getMethod()); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertNull(request.getEntity()); } @@ -383,7 +407,7 @@ public void testOpenIndex() { StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open"); assertThat(endpoint.toString(), equalTo(request.getEndpoint())); assertThat(expectedParams, equalTo(request.getParameters())); - assertThat(request.getMethod(), equalTo("POST")); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); assertThat(request.getEntity(), nullValue()); } @@ -400,7 +424,7 @@ public void testCloseIndex() { StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); assertThat(endpoint.toString(), equalTo(request.getEndpoint())); assertThat(expectedParams, equalTo(request.getParameters())); - assertThat(request.getMethod(), equalTo("POST")); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); assertThat(request.getEntity(), nullValue()); } @@ -414,9 +438,9 @@ public void testIndex() throws IOException { Map expectedParams = new HashMap<>(); - String method = "POST"; + String method = HttpPost.METHOD_NAME; if (id != null) { - method = "PUT"; + method = HttpPut.METHOD_NAME; if (randomBoolean()) { indexRequest.opType(DocWriteRequest.OpType.CREATE); } @@ -551,7 +575,7 @@ public void testUpdate() throws IOException { Request request = Request.update(updateRequest); assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - assertEquals("POST", request.getMethod()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); HttpEntity entity = request.getEntity(); assertTrue(entity instanceof ByteArrayEntity); @@ -665,7 +689,7 @@ public void testBulk() throws IOException { Request request = Request.bulk(bulkRequest); assertEquals("/_bulk", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); - assertEquals("POST", request.getMethod()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); byte[] content = new byte[(int) request.getEntity().getContentLength()]; try (InputStream inputStream = request.getEntity().getContent()) { @@ -876,6 +900,7 @@ public void testSearch() throws Exception { endpoint.add(type); } endpoint.add("_search"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(searchSourceBuilder, request.getEntity()); @@ -914,6 +939,7 @@ public void testMultiSearch() throws IOException { Request request = Request.multiSearch(multiSearchRequest); assertEquals("/_msearch", request.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(expectedParams, request.getParameters()); List requests = new ArrayList<>(); @@ -937,7 +963,7 @@ public void testSearchScroll() throws IOException { searchScrollRequest.scroll(randomPositiveTimeValue()); } Request request = Request.searchScroll(searchScrollRequest); - assertEquals("GET", request.getMethod()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(searchScrollRequest, request.getEntity()); @@ -951,13 +977,51 @@ public void testClearScroll() throws IOException { clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10)); } Request request = Request.clearScroll(clearScrollRequest); - assertEquals("DELETE", request.getMethod()); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(clearScrollRequest, request.getEntity()); assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testExistsAlias() { + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); + String[] indices = randomIndicesNames(0, 5); + getAliasesRequest.indices(indices); + //the HEAD endpoint requires at least an alias or an index + String[] aliases = randomIndicesNames(indices.length == 0 ? 1 : 0, 5); + getAliasesRequest.aliases(aliases); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + boolean local = randomBoolean(); + getAliasesRequest.local(local); + } + expectedParams.put("local", Boolean.toString(getAliasesRequest.local())); + + setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams); + + Request request = Request.existsAlias(getAliasesRequest); + StringJoiner expectedEndpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + expectedEndpoint.add(index); + } + expectedEndpoint.add("_alias"); + String alias = String.join(",", aliases); + if (Strings.hasLength(alias)) { + expectedEndpoint.add(alias); + } + assertEquals(expectedEndpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertNull(request.getEntity()); + } + + public void testExistsAliasNoAliasNoIndex() { + GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest)); + assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); + } + private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); @@ -992,14 +1056,25 @@ public void testParamsNoDuplicates() { assertEquals("1", requestParams.values().iterator().next()); } + public void testBuildEndpoint() { + assertEquals("/", Request.buildEndpoint()); + assertEquals("/", Request.buildEndpoint(Strings.EMPTY_ARRAY)); + assertEquals("/", Request.buildEndpoint("")); + assertEquals("/a/b", Request.buildEndpoint("a", "b")); + assertEquals("/a/b/_create", Request.buildEndpoint("a", "b", "_create")); + assertEquals("/a/b/c/_create", Request.buildEndpoint("a", "b", "c", "_create")); + assertEquals("/a/_create", Request.buildEndpoint("a", null, null, "_create")); + } + public void testEndpoint() { - assertEquals("/", Request.endpoint()); - assertEquals("/", Request.endpoint(Strings.EMPTY_ARRAY)); - assertEquals("/", Request.endpoint("")); - assertEquals("/a/b", Request.endpoint("a", "b")); - assertEquals("/a/b/_create", Request.endpoint("a", "b", "_create")); - assertEquals("/a/b/c/_create", Request.endpoint("a", "b", "c", "_create")); - assertEquals("/a/_create", Request.endpoint("a", null, null, "_create")); + assertEquals("/index/type/id", Request.endpoint("index", "type", "id")); + assertEquals("/index/type/id/_endpoint", Request.endpoint("index", "type", "id", "_endpoint")); + assertEquals("/index1,index2", Request.endpoint(new String[]{"index1", "index2"})); + assertEquals("/index1,index2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, "_endpoint")); + assertEquals("/index1,index2/type1,type2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, + new String[]{"type1", "type2"}, "_endpoint")); + assertEquals("/index1,index2/_endpoint/suffix1,suffix2", Request.endpoint(new String[]{"index1", "index2"}, + "_endpoint", new String[]{"suffix1", "suffix2"})); } public void testCreateContentType() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 1d5961c506f83..914e9c874ae28 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -179,7 +179,7 @@ public void testSearchScroll() throws IOException { assertEquals(5, searchResponse.getTotalShards()); assertEquals(5, searchResponse.getSuccessfulShards()); assertEquals(100, searchResponse.getTook().getMillis()); - verify(restClient).performRequest(eq("GET"), eq("/_search/scroll"), eq(Collections.emptyMap()), + verify(restClient).performRequest(eq("POST"), eq("/_search/scroll"), eq(Collections.emptyMap()), isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers))); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 23029c7c6b007..1bcfb0bbc24a0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -22,8 +22,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -301,16 +305,10 @@ public void onFailure(Exception e) { } }); // end::put-mapping-execute-async - - assertBusy(() -> { - // TODO Use Indices Exist API instead once it exists - Response response = client.getLowLevelClient().performRequest("HEAD", "twitter"); - assertTrue(RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode()); - }); } } - public void testOpenIndex() throws IOException { + public void testOpenIndex() throws Exception { RestHighLevelClient client = highLevelClient(); { @@ -381,7 +379,7 @@ public void onFailure(Exception e) { } } - public void testCloseIndex() throws IOException { + public void testCloseIndex() throws Exception { RestHighLevelClient client = highLevelClient(); { @@ -429,19 +427,123 @@ public void onFailure(Exception e) { } }); // end::close-index-execute-async + } + } + + public void testExistsAlias() throws Exception { + RestHighLevelClient client = highLevelClient(); { - // tag::close-index-notfound - try { - CloseIndexRequest request = new CloseIndexRequest("does_not_exist"); - client.indices().close(request); - } catch (ElasticsearchException exception) { - if (exception.status() == RestStatus.BAD_REQUEST) { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index") + .alias(new Alias("alias"))); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::exists-alias-request + GetAliasesRequest request = new GetAliasesRequest(); + GetAliasesRequest requestWithAlias = new GetAliasesRequest("alias1"); + GetAliasesRequest requestWithAliases = new GetAliasesRequest(new String[]{"alias1", "alias2"}); + // end::exists-alias-request + + // tag::exists-alias-request-alias + request.aliases("alias"); // <1> + // end::exists-alias-request-alias + // tag::exists-alias-request-indices + request.indices("index"); // <1> + // end::exists-alias-request-indices + + // tag::exists-alias-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::exists-alias-request-indicesOptions + + // tag::exists-alias-request-local + request.local(true); // <1> + // end::exists-alias-request-local + + // tag::exists-alias-execute + boolean exists = client.indices().existsAlias(request); + // end::exists-alias-execute + assertTrue(exists); + + // tag::exists-alias-execute-async + client.indices().existsAliasAsync(request, new ActionListener() { + @Override + public void onResponse(Boolean exists) { // <1> } - } - // end::close-index-notfound + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::exists-alias-execute-async } } + + public void testIndicesAliases() throws IOException { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index1")); + assertTrue(createIndexResponse.isAcknowledged()); + createIndexResponse = client.indices().create(new CreateIndexRequest("index2")); + assertTrue(createIndexResponse.isAcknowledged()); + createIndexResponse = client.indices().create(new CreateIndexRequest("index3")); + assertTrue(createIndexResponse.isAcknowledged()); + createIndexResponse = client.indices().create(new CreateIndexRequest("index4")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + { + // tag::update-aliases-request + IndicesAliasesRequest request = new IndicesAliasesRequest(); // <1> + AliasActions aliasAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("alias1"); // <2> + request.addAliasAction(aliasAction); // <3> + // end::update-aliases-request + + // tag::update-aliases-request2 + AliasActions addIndexAction = new AliasActions(AliasActions.Type.ADD).index("index1").alias("alias1") + .filter("{\"term\":{\"year\":2016}}"); // <1> + AliasActions addIndicesAction = new AliasActions(AliasActions.Type.ADD).indices("index1", "index2").alias("alias2") + .routing("1"); // <2> + AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index("index3").alias("alias3"); // <3> + AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index("index4"); // <4> + // end::update-aliases-request2 + + // tag::update-aliases-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::update-aliases-request-timeout + // tag::update-aliases-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::update-aliases-request-masterTimeout + + // tag::update-aliases-execute + IndicesAliasesResponse indicesAliasesResponse = client.indices().updateAliases(request); + // end::update-aliases-execute + + // tag::update-aliases-response + boolean acknowledged = indicesAliasesResponse.isAcknowledged(); // <1> + // end::update-aliases-response + assertTrue(acknowledged); + + // tag::update-aliases-execute-async + client.indices().updateAliasesAsync(request, new ActionListener() { + @Override + public void onResponse(IndicesAliasesResponse indciesAliasesResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }); + // end::update-aliases-execute-async + } + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestTests.java new file mode 100644 index 0000000000000..80e3c34818c3b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequestTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.alias; + +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; +import static org.hamcrest.CoreMatchers.equalTo; + +public class IndicesAliasesRequestTests extends ESTestCase { + + public void testToAndFromXContent() throws IOException { + IndicesAliasesRequest indicesAliasesRequest = createTestInstance(); + XContentType xContentType = randomFrom(XContentType.values()); + + BytesReference shuffled = toShuffledXContent(indicesAliasesRequest, xContentType, ToXContent.EMPTY_PARAMS, true, "filter"); + + IndicesAliasesRequest parsedIndicesAliasesRequest; + try (XContentParser parser = createParser(xContentType.xContent(), shuffled)) { + parsedIndicesAliasesRequest = IndicesAliasesRequest.fromXContent(parser); + assertNull(parser.nextToken()); + } + + for (int i = 0; i < parsedIndicesAliasesRequest.getAliasActions().size(); i++) { + AliasActions expectedAction = indicesAliasesRequest.getAliasActions().get(i); + AliasActions actualAction = parsedIndicesAliasesRequest.getAliasActions().get(i); + assertThat(actualAction, equalTo(expectedAction)); + } + } + + private IndicesAliasesRequest createTestInstance() { + int numItems = randomIntBetween(0, 32); + IndicesAliasesRequest request = new IndicesAliasesRequest(); + if (randomBoolean()) { + request.timeout(randomTimeValue()); + } + + if (randomBoolean()) { + request.masterNodeTimeout(randomTimeValue()); + } + for (int i = 0; i < numItems; i++) { + request.addAliasAction(randomAliasAction()); + } + return request; + } +} diff --git a/docs/java-api/query-dsl/geo-distance-query.asciidoc b/docs/java-api/query-dsl/geo-distance-query.asciidoc index 7927dff440be6..cc8c89ca61eea 100644 --- a/docs/java-api/query-dsl/geo-distance-query.asciidoc +++ b/docs/java-api/query-dsl/geo-distance-query.asciidoc @@ -5,7 +5,7 @@ See {ref}/query-dsl-geo-distance-query.html[Geo Distance Query] ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{query-dsl-test}[geo_bounding_box] +include-tagged::{query-dsl-test}[geo_distance] -------------------------------------------------- <1> field <2> center point diff --git a/docs/java-rest/high-level/apis/exists_alias.asciidoc b/docs/java-rest/high-level/apis/exists_alias.asciidoc new file mode 100644 index 0000000000000..cbbb2c3315c2b --- /dev/null +++ b/docs/java-rest/high-level/apis/exists_alias.asciidoc @@ -0,0 +1,69 @@ +[[java-rest-high-exists-alias]] +=== Exists Alias API + +[[java-rest-high-exists-alias-request]] +==== Exists Alias Request + +The Exists Alias API uses `GetAliasesRequest` as its request object. +One or more aliases can be optionally provided either at construction +time or later on through the relevant setter method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request] +-------------------------------------------------- + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-alias] +-------------------------------------------------- +<1> One or more aliases to look for + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-indices] +-------------------------------------------------- +<1> The index or indices that the alias is associated with + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-request-local] +-------------------------------------------------- +<1> The `local` flag (defaults to `false`) controls whether the aliases need +to be looked up in the local cluster state or in the cluster state held by +the elected master node. + +[[java-rest-high-exists-alias-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-execute] +-------------------------------------------------- + +[[java-rest-high-exists-alias-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[exists-alias-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-exists-alias-response]] +==== Exists Alias Response + +The Exists Alias API returns a `boolean` that indicates whether the provided +alias (or aliases) was found or not. diff --git a/docs/java-rest/high-level/apis/index.asciidoc b/docs/java-rest/high-level/apis/index.asciidoc index f7367b6e8c26d..5f1f15b644799 100644 --- a/docs/java-rest/high-level/apis/index.asciidoc +++ b/docs/java-rest/high-level/apis/index.asciidoc @@ -8,6 +8,10 @@ include::close_index.asciidoc[] include::putmapping.asciidoc[] +include::update_aliases.asciidoc[] + +include::exists_alias.asciidoc[] + include::_index.asciidoc[] include::get.asciidoc[] diff --git a/docs/java-rest/high-level/apis/update_aliases.asciidoc b/docs/java-rest/high-level/apis/update_aliases.asciidoc new file mode 100644 index 0000000000000..14f3fd2eb8366 --- /dev/null +++ b/docs/java-rest/high-level/apis/update_aliases.asciidoc @@ -0,0 +1,79 @@ +[[java-rest-high-update-aliases]] +=== Update Aliases API + +[[java-rest-high-update-aliases-request]] +==== Indices Aliases Request + +The Update Aliases API allows aliasing an index with a name, with all APIs +automatically converting the alias name to the actual index name. + +An `IndicesAliasesRequest` must have at least one `AliasActions`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request] +-------------------------------------------------- +<1> Creates an `IndicesAliasesRequest` +<2> Creates an `AliasActions` that aliases index `test1` with `alias1` +<3> Adds the alias action to the request + +The following action types are supported: `add` - alias an index, `remove` - +removes the alias associated with the index, and `remove_index` - deletes the +index. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request2] +-------------------------------------------------- +<1> Creates an alias `alias1` with an optional filter on field `year` +<2> Creates an alias `alias2` associated with two indices and with an optional routing +<3> Removes the associated alias `alias3` +<4> `remove_index` is just like <> + +==== Optional arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the operation as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the operation as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +[[java-rest-high-update-aliases-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-execute] +-------------------------------------------------- + +[[java-rest-high-update-aliases-async]] +==== Asynchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-execute-async] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-update-aliases-response]] +==== Indices Aliases Response + +The returned `IndicesAliasesResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[update-aliases-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index aede4789f4dec..8eb2590e99d9d 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -9,6 +9,8 @@ Indices APIs:: * <> * <> * <> +* <> +* <> Single document APIs:: * <> diff --git a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc index be18689bfddc4..58de8c5a3e142 100644 --- a/docs/reference/aggregations/bucket/composite-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/composite-aggregation.asciidoc @@ -394,6 +394,10 @@ GET /_search ... "aggregations": { "my_buckets": { + "after_key": { <1> + "date": 1494288000000, + "product": "mad max" + }, "buckets": [ { "key": { @@ -403,7 +407,7 @@ GET /_search "doc_count": 1 }, { - "key": { <1> + "key": { "date": 1494288000000, "product": "mad max" }, @@ -418,9 +422,14 @@ GET /_search <1> The last composite bucket returned by the query. +NOTE: The `after_key` is equals to the last bucket returned in the response before +any filtering that could be done by <>. +If all buckets are filtered/removed by a pipeline aggregation, the `after_key` will contain +the last bucket before filtering. + The `after` parameter can be used to retrieve the composite buckets that are **after** the last composite buckets returned in a previous round. -For the example below the last bucket is `"key": [1494288000000, "mad max"]` so the next +For the example below the last bucket can be found in `after_key` and the next round of result can be retrieved with: [source,js] @@ -485,6 +494,10 @@ GET /_search ... "aggregations": { "my_buckets": { + "after_key": { + "date": 1494201600000, + "product": "rocky" + }, "buckets": [ { "key": { diff --git a/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc b/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc index 8797e6041d5f3..f45629b14e746 100644 --- a/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/reverse-nested-aggregation.asciidoc @@ -93,7 +93,7 @@ GET /issues/_search // TEST[s/_search/_search\?filter_path=aggregations/] As you can see above, the `reverse_nested` aggregation is put in to a `nested` aggregation as this is the only place -in the dsl where the `reversed_nested` aggregation can be used. Its sole purpose is to join back to a parent doc higher +in the dsl where the `reverse_nested` aggregation can be used. Its sole purpose is to join back to a parent doc higher up in the nested structure. <1> A `reverse_nested` aggregation that joins back to the root / main document level, because no `path` has been defined. diff --git a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc index e768cb0b295b8..1c739c40996b2 100644 --- a/docs/reference/aggregations/bucket/terms-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/terms-aggregation.asciidoc @@ -114,6 +114,11 @@ This means that if the number of unique terms is greater than `size`, the return (it could be that the term counts are slightly off and it could even be that a term that should have been in the top size buckets was not returned). +NOTE: If you want to retrieve **all** terms or all combinations of terms in a nested `terms` aggregation + you should use the <> aggregation which + allows to paginate over all possible terms rather than setting a size greater than the cardinality of the field in the + `terms` aggregation. The `terms` aggregation is meant to return the `top` terms and does not allow pagination. + [[search-aggregations-bucket-terms-aggregation-approximate-counts]] ==== Document counts are approximate diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 0a6dbd0eb8359..b3156dbc1f414 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -858,7 +858,7 @@ GET /bank/_search Note that if `size` is not specified, it defaults to 10. -This example does a `match_all` and returns documents 11 through 20: +This example does a `match_all` and returns documents 10 through 19: [source,js] -------------------------------------------------- diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index c418a333a8d2e..4fa1b5f035731 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -31,6 +31,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. * <> * <> * <> +* <> include::migrate_7_0/aggregations.asciidoc[] @@ -41,3 +42,4 @@ include::migrate_7_0/mappings.asciidoc[] include::migrate_7_0/search.asciidoc[] include::migrate_7_0/plugins.asciidoc[] include::migrate_7_0/api.asciidoc[] +include::migrate_7_0/java.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/java.asciidoc b/docs/reference/migration/migrate_7_0/java.asciidoc new file mode 100644 index 0000000000000..a686ba0bfbfbc --- /dev/null +++ b/docs/reference/migration/migrate_7_0/java.asciidoc @@ -0,0 +1,8 @@ +[[breaking_70_java_changes]] +=== Java API changes + +==== `isShardsAcked` deprecated in `6.2` has been removed + +`isShardsAcked` has been replaced by `isShardsAcknowledged` in +`CreateIndexResponse`, `RolloverResponse` and +`CreateIndexClusterStateUpdateResponse`. \ No newline at end of file diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 4552366de9800..81d9c4c369075 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -142,8 +142,9 @@ You can specify the locale to use with `boundary_scanner_locale`. boundary_scanner_locale:: Controls which locale is used to search for sentence and word boundaries. -encoder:: Indicates if the highlighted text should be HTML encoded: -`default` (no encoding) or `html` (escapes HTML highlighting tags). +encoder:: Indicates if the snippet should be HTML encoded: +`default` (no encoding) or `html` (HTML-escape the snippet text and then +insert the highlighting tags) fields:: Specifies the fields to retrieve highlights for. You can use wildcards to specify fields. For example, you could specify `comment_*` to diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java index 7925856656e15..5167f7d1434de 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/MethodWriter.java @@ -81,7 +81,7 @@ public final class MethodWriter extends GeneratorAdapter { private final BitSet statements; private final CompilerSettings settings; - private final Deque> stringConcatArgs = + private final Deque> stringConcatArgs = (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE == null) ? null : new ArrayDeque<>(); public MethodWriter(int access, Method method, ClassVisitor cw, BitSet statements, CompilerSettings settings) { @@ -200,7 +200,7 @@ private void writeCast(Class from, Class to) { * Proxy the box method to use valueOf instead to ensure that the modern boxing methods are used. */ @Override - public void box(org.objectweb.asm.Type type) { + public void box(Type type) { valueOf(type); } @@ -252,10 +252,10 @@ public int writeNewStrings() { } } - public void writeAppendStrings(final Definition.Type type) { + public void writeAppendStrings(Class clazz) { if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) { // Java 9+: record type information - stringConcatArgs.peek().add(type.type); + stringConcatArgs.peek().add(getType(clazz)); // prevent too many concat args. // If there are too many, do the actual concat: if (stringConcatArgs.peek().size() >= MAX_INDY_STRING_CONCAT_ARGS) { @@ -266,24 +266,24 @@ public void writeAppendStrings(final Definition.Type type) { } } else { // Java 8: push a StringBuilder append - if (type.clazz == boolean.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_BOOLEAN); - else if (type.clazz == char.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_CHAR); - else if (type.clazz == byte.class || - type.clazz == short.class || - type.clazz == int.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_INT); - else if (type.clazz == long.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_LONG); - else if (type.clazz == float.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_FLOAT); - else if (type.clazz == double.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_DOUBLE); - else if (type.clazz == String.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_STRING); - else invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_OBJECT); + if (clazz == boolean.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_BOOLEAN); + else if (clazz == char.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_CHAR); + else if (clazz == byte.class || + clazz == short.class || + clazz == int.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_INT); + else if (clazz == long.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_LONG); + else if (clazz == float.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_FLOAT); + else if (clazz == double.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_DOUBLE); + else if (clazz == String.class) invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_STRING); + else invokeVirtual(STRINGBUILDER_TYPE, STRINGBUILDER_APPEND_OBJECT); } } public void writeToStrings() { if (INDY_STRING_CONCAT_BOOTSTRAP_HANDLE != null) { // Java 9+: use type information and push invokeDynamic - final String desc = org.objectweb.asm.Type.getMethodDescriptor(STRING_TYPE, - stringConcatArgs.pop().stream().toArray(org.objectweb.asm.Type[]::new)); + final String desc = Type.getMethodDescriptor(STRING_TYPE, + stringConcatArgs.pop().stream().toArray(Type[]::new)); invokeDynamic("concat", desc, INDY_STRING_CONCAT_BOOTSTRAP_HANDLE); } else { // Java 8: call toString() on StringBuilder @@ -292,9 +292,9 @@ public void writeToStrings() { } /** Writes a dynamic binary instruction: returnType, lhs, and rhs can be different */ - public void writeDynamicBinaryInstruction(Location location, Definition.Type returnType, Definition.Type lhs, Definition.Type rhs, + public void writeDynamicBinaryInstruction(Location location, Class returnType, Class lhs, Class rhs, Operation operation, int flags) { - org.objectweb.asm.Type methodType = org.objectweb.asm.Type.getMethodType(returnType.type, lhs.type, rhs.type); + Type methodType = Type.getMethodType(getType(returnType), getType(lhs), getType(rhs)); switch (operation) { case MUL: @@ -310,7 +310,7 @@ public void writeDynamicBinaryInstruction(Location location, Definition.Type ret // if either side is primitive, then the + operator should always throw NPE on null, // so we don't need a special NPE guard. // otherwise, we need to allow nulls for possible string concatenation. - boolean hasPrimitiveArg = lhs.clazz.isPrimitive() || rhs.clazz.isPrimitive(); + boolean hasPrimitiveArg = lhs.isPrimitive() || rhs.isPrimitive(); if (!hasPrimitiveArg) { flags |= DefBootstrap.OPERATOR_ALLOWS_NULL; } @@ -343,8 +343,8 @@ public void writeDynamicBinaryInstruction(Location location, Definition.Type ret } /** Writes a static binary instruction */ - public void writeBinaryInstruction(Location location, Definition.Type type, Operation operation) { - if ((type.clazz == float.class || type.clazz == double.class) && + public void writeBinaryInstruction(Location location, Class clazz, Operation operation) { + if ( (clazz == float.class || clazz == double.class) && (operation == Operation.LSH || operation == Operation.USH || operation == Operation.RSH || operation == Operation.BWAND || operation == Operation.XOR || operation == Operation.BWOR)) { @@ -352,17 +352,17 @@ public void writeBinaryInstruction(Location location, Definition.Type type, Oper } switch (operation) { - case MUL: math(GeneratorAdapter.MUL, type.type); break; - case DIV: math(GeneratorAdapter.DIV, type.type); break; - case REM: math(GeneratorAdapter.REM, type.type); break; - case ADD: math(GeneratorAdapter.ADD, type.type); break; - case SUB: math(GeneratorAdapter.SUB, type.type); break; - case LSH: math(GeneratorAdapter.SHL, type.type); break; - case USH: math(GeneratorAdapter.USHR, type.type); break; - case RSH: math(GeneratorAdapter.SHR, type.type); break; - case BWAND: math(GeneratorAdapter.AND, type.type); break; - case XOR: math(GeneratorAdapter.XOR, type.type); break; - case BWOR: math(GeneratorAdapter.OR, type.type); break; + case MUL: math(GeneratorAdapter.MUL, getType(clazz)); break; + case DIV: math(GeneratorAdapter.DIV, getType(clazz)); break; + case REM: math(GeneratorAdapter.REM, getType(clazz)); break; + case ADD: math(GeneratorAdapter.ADD, getType(clazz)); break; + case SUB: math(GeneratorAdapter.SUB, getType(clazz)); break; + case LSH: math(GeneratorAdapter.SHL, getType(clazz)); break; + case USH: math(GeneratorAdapter.USHR, getType(clazz)); break; + case RSH: math(GeneratorAdapter.SHR, getType(clazz)); break; + case BWAND: math(GeneratorAdapter.AND, getType(clazz)); break; + case XOR: math(GeneratorAdapter.XOR, getType(clazz)); break; + case BWOR: math(GeneratorAdapter.OR, getType(clazz)); break; default: throw location.createError(new IllegalStateException("Illegal tree structure.")); } @@ -416,7 +416,7 @@ public void visitEnd() { * @param flavor type of call * @param params flavor-specific parameters */ - public void invokeDefCall(String name, org.objectweb.asm.Type methodType, int flavor, Object... params) { + public void invokeDefCall(String name, Type methodType, int flavor, Object... params) { Object[] args = new Object[params.length + 2]; args[0] = settings.getInitialCallSiteDepth(); args[1] = flavor; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java index 45ca4601e963d..3715c5802bb75 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EAssignment.java @@ -25,6 +25,7 @@ import org.elasticsearch.painless.Definition; import org.elasticsearch.painless.Definition.Cast; import org.elasticsearch.painless.Definition.Type; +import org.elasticsearch.painless.Definition.def; import org.elasticsearch.painless.Globals; import org.elasticsearch.painless.Locals; import org.elasticsearch.painless.Location; @@ -274,12 +275,12 @@ void write(MethodWriter writer, Globals globals) { writer.writeDup(lhs.accessElementCount(), catElementStackSize); // dup the top element and insert it // before concat helper on stack lhs.load(writer, globals); // read the current lhs's value - writer.writeAppendStrings(lhs.actual); // append the lhs's value using the StringBuilder + writer.writeAppendStrings(Definition.TypeToClass(lhs.actual)); // append the lhs's value using the StringBuilder rhs.write(writer, globals); // write the bytecode for the rhs - if (!(rhs instanceof EBinary) || !((EBinary)rhs).cat) { // check to see if the rhs has already done a concatenation - writer.writeAppendStrings(rhs.actual); // append the rhs's value since it's hasn't already + if (!(rhs instanceof EBinary) || !((EBinary)rhs).cat) { // check to see if the rhs has already done a concatenation + writer.writeAppendStrings(Definition.TypeToClass(rhs.actual)); // append the rhs's value since it's hasn't already } writer.writeToStrings(); // put the value for string concat onto the stack @@ -313,9 +314,9 @@ void write(MethodWriter writer, Globals globals) { // write the operation instruction for compound assignment if (promote.dynamic) { writer.writeDynamicBinaryInstruction( - location, promote, DefType, DefType, operation, DefBootstrap.OPERATOR_COMPOUND_ASSIGNMENT); + location, Definition.TypeToClass(promote), def.class, def.class, operation, DefBootstrap.OPERATOR_COMPOUND_ASSIGNMENT); } else { - writer.writeBinaryInstruction(location, promote, operation); + writer.writeBinaryInstruction(location, Definition.TypeToClass(promote), operation); } writer.writeCast(back); // if necessary cast the promotion type value back to the lhs's type diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java index 55c2145acd8cd..b0ad92d3fc422 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EBinary.java @@ -649,13 +649,13 @@ void write(MethodWriter writer, Globals globals) { left.write(writer, globals); if (!(left instanceof EBinary) || !((EBinary)left).cat) { - writer.writeAppendStrings(left.actual); + writer.writeAppendStrings(Definition.TypeToClass(left.actual)); } right.write(writer, globals); if (!(right instanceof EBinary) || !((EBinary)right).cat) { - writer.writeAppendStrings(right.actual); + writer.writeAppendStrings(Definition.TypeToClass(right.actual)); } if (!cat) { @@ -684,9 +684,10 @@ void write(MethodWriter writer, Globals globals) { if (originallyExplicit) { flags |= DefBootstrap.OPERATOR_EXPLICIT_CAST; } - writer.writeDynamicBinaryInstruction(location, actual, left.actual, right.actual, operation, flags); + writer.writeDynamicBinaryInstruction(location, Definition.TypeToClass(actual), + Definition.TypeToClass(left.actual), Definition.TypeToClass(right.actual), operation, flags); } else { - writer.writeBinaryInstruction(location, actual, operation); + writer.writeBinaryInstruction(location, Definition.TypeToClass(actual), operation); } } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java index 3ebd674a81eab..6572313308b32 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RethrottleTests.java @@ -78,7 +78,7 @@ public void testDeleteByQueryWithWorkers() throws Exception { private void testCase(AbstractBulkByScrollRequestBuilder request, String actionName) throws Exception { logger.info("Starting test for [{}] with [{}] slices", actionName, request.request().getSlices()); /* Add ten documents per slice so most slices will have many documents to process, having to go to multiple batches. - * we can't rely on all of them doing so, but + * We can't rely on the slices being evenly sized but 10 means we have some pretty big slices. */ createIndex("test"); @@ -170,6 +170,8 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a // Now the response should come back quickly because we've rethrottled the request BulkByScrollResponse response = responseListener.get(); + + // It'd be bad if the entire require completed in a single batch. The test wouldn't be testing anything. assertThat("Entire request completed in a single batch. This may invalidate the test as throttling is done between batches.", response.getBatches(), greaterThanOrEqualTo(numSlices)); } @@ -189,8 +191,9 @@ private ListTasksResponse rethrottleTask(TaskId taskToRethrottle, float newReque assertThat(rethrottleResponse.getTasks(), hasSize(1)); response.set(rethrottleResponse); } catch (ElasticsearchException e) { - // if it's the error we're expecting, rethrow as AssertionError so awaitBusy doesn't exit early if (e.getCause() instanceof IllegalArgumentException) { + // We want to retry in this case so we throw an assertion error + logger.info("caught unprepared task, retrying until prepared"); throw new AssertionError("Rethrottle request for task [" + taskToRethrottle.getId() + "] failed", e); } else { throw e; @@ -206,14 +209,32 @@ private TaskGroup findTaskToRethrottle(String actionName, int sliceCount) { do { ListTasksResponse tasks = client().admin().cluster().prepareListTasks().setActions(actionName).setDetailed(true).get(); tasks.rethrowFailures("Finding tasks to rethrottle"); - assertThat(tasks.getTaskGroups(), hasSize(lessThan(2))); + assertThat("tasks are left over from the last execution of this test", + tasks.getTaskGroups(), hasSize(lessThan(2))); if (0 == tasks.getTaskGroups().size()) { + // The parent task hasn't started yet continue; } TaskGroup taskGroup = tasks.getTaskGroups().get(0); - if (sliceCount != 1 && taskGroup.getChildTasks().size() == 0) { - // If there are child tasks wait for at least one to start - continue; + if (sliceCount != 1) { + BulkByScrollTask.Status status = (BulkByScrollTask.Status) taskGroup.getTaskInfo().getStatus(); + /* + * If there are child tasks wait for all of them to start. It + * is possible that we'll end up with some very small slices + * (maybe even empty!) that complete super fast so we have to + * count them too. + */ + long finishedChildStatuses = status.getSliceStatuses().stream() + .filter(n -> n != null) + .count(); + logger.info("Expected [{}] total children, [{}] are running and [{}] are finished\n{}", + sliceCount, taskGroup.getChildTasks().size(), finishedChildStatuses, status.getSliceStatuses()); + if (sliceCount == finishedChildStatuses) { + fail("all slices finished:\n" + status); + } + if (sliceCount != taskGroup.getChildTasks().size() + finishedChildStatuses) { + continue; + } } return taskGroup; } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 222cbca07ae55..5d4bcd7c10a84 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -34,13 +34,13 @@ compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-tr dependencies { // network stack - compile "io.netty:netty-buffer:4.1.13.Final" - compile "io.netty:netty-codec:4.1.13.Final" - compile "io.netty:netty-codec-http:4.1.13.Final" - compile "io.netty:netty-common:4.1.13.Final" - compile "io.netty:netty-handler:4.1.13.Final" - compile "io.netty:netty-resolver:4.1.13.Final" - compile "io.netty:netty-transport:4.1.13.Final" + compile "io.netty:netty-buffer:4.1.16.Final" + compile "io.netty:netty-codec:4.1.16.Final" + compile "io.netty:netty-codec-http:4.1.16.Final" + compile "io.netty:netty-common:4.1.16.Final" + compile "io.netty:netty-handler:4.1.16.Final" + compile "io.netty:netty-resolver:4.1.16.Final" + compile "io.netty:netty-transport:4.1.16.Final" } dependencyLicenses { @@ -149,18 +149,18 @@ thirdPartyAudit.excludes = [ 'io.netty.util.internal.PlatformDependent0$3', 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', - 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueue', - 'io.netty.util.internal.shaded.org.jctools.queues.ConcurrentSequencedCircularArrayQueue', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', - 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueConsumerField', - 'io.netty.util.internal.shaded.org.jctools.queues.MpmcArrayQueueProducerField', - 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerField', - 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueHeadLimitField', - 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueTailField', - 'io.netty.util.internal.shaded.org.jctools.util.JvmInfo', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', 'org.conscrypt.Conscrypt$Engines', 'org.conscrypt.HandshakeListener' ] diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.13.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.13.Final.jar.sha1 deleted file mode 100644 index 31f015e158af4..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.13.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e3f583ea8a2618a7563b1ee2aa696c23edcc3d8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.16.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.16.Final.jar.sha1 new file mode 100644 index 0000000000000..c546222971985 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.16.Final.jar.sha1 @@ -0,0 +1 @@ +63b5fa95c74785e16f2c30ce268bc222e35c8cb5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.13.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.13.Final.jar.sha1 deleted file mode 100644 index 6e7f1bdc14ced..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.13.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -370eeb6e9d92495a2a3be096ab6102755af76730 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.16.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.16.Final.jar.sha1 new file mode 100644 index 0000000000000..1e6c241ea0b17 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.16.Final.jar.sha1 @@ -0,0 +1 @@ +d84a1f21768b7309c2954521cf5a1f46c2309eb1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.13.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.13.Final.jar.sha1 deleted file mode 100644 index 80d18918e1d3f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.13.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0ee87368766e6b900cf6be8ac9cdce27156e9411 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.16.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.16.Final.jar.sha1 new file mode 100644 index 0000000000000..71c33af1c5fc2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.16.Final.jar.sha1 @@ -0,0 +1 @@ +d64312378b438dfdad84267c599a053327c6f02a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.13.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.13.Final.jar.sha1 deleted file mode 100644 index 044ec3ef4ed2c..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.13.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f640e8cd8866527150784f8986152d3bba45b712 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.16.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.16.Final.jar.sha1 new file mode 100644 index 0000000000000..3edf5fcea59b3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.16.Final.jar.sha1 @@ -0,0 +1 @@ +177a6b30cca92f6f5f9873c9befd681377a4c328 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.13.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.13.Final.jar.sha1 deleted file mode 100644 index 862f16a32a3ec..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.13.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85847aa81a98d29948731befb4784d141046fa0e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.16.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.16.Final.jar.sha1 new file mode 100644 index 0000000000000..cba27387268d1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.16.Final.jar.sha1 @@ -0,0 +1 @@ +fec0e63e7dd7f4eeef7ea8dc47a1ff32dfc7ebc2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.13.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.13.Final.jar.sha1 deleted file mode 100644 index 7857ddac89cb9..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.13.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d33ce420bd22c8a53246296ceb6e1ff08d31f8e1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.16.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.16.Final.jar.sha1 new file mode 100644 index 0000000000000..3571d2ecfdc48 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.16.Final.jar.sha1 @@ -0,0 +1 @@ +f6eb553b53fb3a90a8ac1170697093fed82eae28 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.13.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.13.Final.jar.sha1 deleted file mode 100644 index dc86ce66f4357..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.13.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5008406221a849a350ad2a8885f14ac330e038f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.16.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.16.Final.jar.sha1 new file mode 100644 index 0000000000000..e502d4c77084c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.16.Final.jar.sha1 @@ -0,0 +1 @@ +3c8ee2c4d4a1cbb947a5c184c7aeb2204260958b \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index 2b21a81d91fa7..d70ed9ea9aa8b 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -73,7 +73,7 @@ public synchronized AmazonS3 client(Settings repositorySettings) { logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint); AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings); - ClientConfiguration configuration = buildConfiguration(clientSettings, repositorySettings); + ClientConfiguration configuration = buildConfiguration(clientSettings); client = new AmazonS3Client(credentials, configuration); @@ -86,7 +86,7 @@ public synchronized AmazonS3 client(Settings repositorySettings) { } // pkg private for tests - static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings, Settings repositorySettings) { + static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index f85f2eb6f322f..18c701f5fc1a6 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -95,7 +95,7 @@ private void assertCredentials(Settings singleRepositorySettings, Settings setti } public void testAWSDefaultConfiguration() { - launchAWSConfigurationTest(Settings.EMPTY, Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, + launchAWSConfigurationTest(Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, ClientConfiguration.DEFAULT_SOCKET_TIMEOUT); } @@ -110,7 +110,7 @@ public void testAWSConfigurationWithAwsSettings() { .put("s3.client.default.proxy.port", 8080) .put("s3.client.default.read_timeout", "10s") .build(); - launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", + launchAWSConfigurationTest(settings, Protocol.HTTP, "aws_proxy_host", 8080, "aws_proxy_username", "aws_proxy_password", 3, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 10000); } @@ -118,7 +118,7 @@ public void testRepositoryMaxRetries() { Settings settings = Settings.builder() .put("s3.client.default.max_retries", 5) .build(); - launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null, + launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 5, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, 50000); } @@ -126,22 +126,21 @@ public void testRepositoryThrottleRetries() { final boolean throttling = randomBoolean(); Settings settings = Settings.builder().put("s3.client.default.use_throttle_retries", throttling).build(); - launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000); + launchAWSConfigurationTest(settings, Protocol.HTTPS, null, -1, null, null, 3, throttling, 50000); } private void launchAWSConfigurationTest(Settings settings, - Settings singleRepositorySettings, - Protocol expectedProtocol, - String expectedProxyHost, - int expectedProxyPort, - String expectedProxyUsername, - String expectedProxyPassword, - Integer expectedMaxRetries, - boolean expectedUseThrottleRetries, - int expectedReadTimeout) { + Protocol expectedProtocol, + String expectedProxyHost, + int expectedProxyPort, + String expectedProxyUsername, + String expectedProxyPassword, + Integer expectedMaxRetries, + boolean expectedUseThrottleRetries, + int expectedReadTimeout) { S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); - ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings, singleRepositorySettings); + ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(clientSettings); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); diff --git a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash index 4d7e100ba9f9e..eda3038ee93d3 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash @@ -30,7 +30,7 @@ # specific language governing permissions and limitations # under the License. -# Install a plugin an run all the common post installation tests. +# Install a plugin install_plugin() { local name=$1 local path="$2" @@ -52,8 +52,6 @@ install_plugin() { sudo -E -u $ESPLUGIN_COMMAND_USER bash -c "umask $umask && \"$ESHOME/bin/elasticsearch-plugin\" install -batch \"file://$path\"" fi - assert_file_exist "$ESPLUGINS/$name" - assert_file_exist "$ESPLUGINS/$name/plugin-descriptor.properties" #check we did not accidentially create a log file as root as /usr/share/elasticsearch assert_file_not_exist "/usr/share/elasticsearch/logs" @@ -66,13 +64,6 @@ install_plugin() { fi } -install_jvm_plugin() { - local name=$1 - local path="$2" - install_plugin $name "$path" $3 - assert_file_exist "$ESPLUGINS/$name/$name"*".jar" -} - # Remove a plugin and make sure its plugin directory is removed. remove_plugin() { local name=$1 @@ -95,7 +86,7 @@ remove_plugin() { # placements for non-site plugins. install_jvm_example() { local relativePath=${1:-$(readlink -m jvm-example-*.zip)} - install_jvm_plugin jvm-example "$relativePath" $2 + install_plugin jvm-example "$relativePath" $2 bin_user=$(find "$ESHOME/bin" -maxdepth 0 -printf "%u") bin_owner=$(find "$ESHOME/bin" -maxdepth 0 -printf "%g") @@ -156,9 +147,11 @@ install_and_check_plugin() { local full_name="$prefix-$name" fi - install_jvm_plugin $full_name "$(readlink -m $full_name-*.zip)" + install_plugin $full_name "$(readlink -m $full_name-*.zip)" assert_module_or_plugin_directory "$ESPLUGINS/$full_name" + assert_file_exist "$ESPLUGINS/$full_name/plugin-descriptor.properties" + assert_file_exist "$ESPLUGINS/$full_name/$full_name"*".jar" # analysis plugins have a corresponding analyzers jar if [ $prefix == 'analysis' ]; then @@ -176,6 +169,17 @@ install_and_check_plugin() { done } +# Install a meta plugin +# $1 - the plugin name +# $@ - all remaining arguments are jars that must exist in the plugin's +# installation directory +install_meta_plugin() { + local name=$1 + + install_plugin $name "$(readlink -m $name-*.zip)" + assert_module_or_plugin_directory "$ESPLUGINS/$name" +} + # Compare a list of plugin names to the plugins in the plugins pom and see if they are the same # $1 the file containing the list of plugins we want to compare to # $2 description of the source of the plugin list diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index e094c47ff422b..b8c89517ec119 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -182,8 +182,8 @@ setup: --- "Aggregate After Missing": - skip: - version: " - 6.99.99" - reason: bug fixed in 7.0.0 + version: " - 6.1.99" + reason: bug fixed in 6.2.0 - do: @@ -240,8 +240,8 @@ setup: --- "Composite aggregation with format": - skip: - version: " - 6.99.99" - reason: this uses a new option (format) added in 7.0.0 + version: " - 6.2.99" + reason: this uses a new option (format) added in 6.3.0 - do: search: @@ -295,3 +295,31 @@ setup: - length: { aggregations.test.buckets: 1 } - match: { aggregations.test.buckets.0.key.date: "2017-10-21" } - match: { aggregations.test.buckets.0.doc_count: 1 } + +--- +"Composite aggregation with after_key in the response": + - skip: + version: " - 6.2.99" + reason: starting in 6.3.0 after_key is returned in the response + + - do: + search: + index: test + body: + aggregations: + test: + composite: + sources: [ + { + "keyword": { + "terms": { + "field": "keyword", + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 2 } + - length: { aggregations.test.after_key: 1 } + - match: { aggregations.test.after_key.keyword: "foo" } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 2798359d4a848..61be2778845ac 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -35,6 +36,7 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -56,7 +58,8 @@ /** * A request to add/remove aliases for one or more indices. */ -public class IndicesAliasesRequest extends AcknowledgedRequest { +public class IndicesAliasesRequest extends AcknowledgedRequest implements ToXContentObject { + private List allAliasActions = new ArrayList<>(); // indices options that require every specified index to exist, expand wildcards only to open @@ -65,22 +68,37 @@ public class IndicesAliasesRequest extends AcknowledgedRequest parser(String name, Supplier { if (action.indices() != null) { throw new IllegalArgumentException("Only one of [index] and [indices] is supported"); } action.indices(indices); - }), new ParseField("indices")); + }), INDICES); parser.declareString((action, alias) -> { if (action.aliases() != null && action.aliases().length != 0) { throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported"); } action.alias(alias); - }, new ParseField("alias")); + }, ALIAS); parser.declareStringArray(fromList(String.class, (action, aliases) -> { if (action.aliases() != null && action.aliases().length != 0) { throw new IllegalArgumentException("Only one of [alias] and [aliases] is supported"); } action.aliases(aliases); - }), new ParseField("aliases")); + }), ALIASES); return parser; } - private static final ObjectParser ADD_PARSER = parser("add", AliasActions::add); + private static final ObjectParser ADD_PARSER = parser(ADD.getPreferredName(), AliasActions::add); static { ADD_PARSER.declareObject(AliasActions::filter, (parser, m) -> { try { @@ -155,14 +173,15 @@ private static ObjectParser parser(String name, Supplier REMOVE_PARSER = parser("remove", AliasActions::remove); - private static final ObjectParser REMOVE_INDEX_PARSER = parser("remove_index", AliasActions::removeIndex); + private static final ObjectParser REMOVE_PARSER = parser(REMOVE.getPreferredName(), AliasActions::remove); + private static final ObjectParser REMOVE_INDEX_PARSER = parser(REMOVE_INDEX.getPreferredName(), + AliasActions::removeIndex); /** * Parser for any one {@link AliasAction}. @@ -183,9 +202,9 @@ private static ObjectParser parser(String name, Supplier parser(String name, Supplier PARSER = new ObjectParser<>("aliases", IndicesAliasesRequest::new); + static { + PARSER.declareObjectArray((request, actions) -> { + for (AliasActions action : actions) { + request.addAliasAction(action); + } + }, AliasActions.PARSER, new ParseField("actions")); + } + + public static IndicesAliasesRequest fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java index 81fb1c1a64f9c..c3cc0b5ebd40b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java @@ -22,16 +22,25 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; /** * A response for a add/remove alias action. */ -public class IndicesAliasesResponse extends AcknowledgedResponse { +public class IndicesAliasesResponse extends AcknowledgedResponse implements ToXContentObject { - IndicesAliasesResponse() { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("indices_aliases", + true, args -> new IndicesAliasesResponse((boolean) args[0])); + static { + declareAcknowledgedField(PARSER); + } + IndicesAliasesResponse() { } IndicesAliasesResponse(boolean acknowledged) { @@ -49,4 +58,16 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + addAcknowledgedField(builder); + builder.endObject(); + return builder; + } + + public static IndicesAliasesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index 46203d369d9e1..6065fcd449b3f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -85,18 +85,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns true if the requisite number of shards were started before - * returning from the index creation operation. If {@link #isAcknowledged()} - * is false, then this also returns false. - * - * @deprecated use {@link #isShardsAcknowledged()} - */ - @Deprecated - public boolean isShardsAcked() { - return shardsAcknowledged; - } - /** * Returns true if the requisite number of shards were started before * returning from the index creation operation. If {@link #isAcknowledged()} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 2dcf4f510470f..7e2ec1677740d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -111,18 +111,6 @@ public boolean isAcknowledged() { return acknowledged; } - /** - * Returns true if the requisite number of shards were started in the newly - * created rollover index before returning. If {@link #isAcknowledged()} is - * false, then this will also return false. - * - * @deprecated use {@link #isShardsAcknowledged()} - */ - @Deprecated - public boolean isShardsAcked() { - return shardsAcknowledged; - } - /** * Returns true if the requisite number of shards were started in the newly * created rollover index before returning. If {@link #isAcknowledged()} is diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 1180d3b121016..615aaec487538 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.support.master; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ack.AckedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java index 3cce3d554f07c..aac8f6af42c9e 100755 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java b/server/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java index 2e9089af79ac9..c7baded410cf8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java +++ b/server/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java @@ -33,14 +33,7 @@ public CreateIndexClusterStateUpdateResponse(boolean acknowledged, boolean shard /** * Returns whether the requisite number of shard copies started before the completion of the operation. - * - * @deprecated use {@link #isShardsAcknowledged()} */ - @Deprecated - public boolean isShardsAcked() { - return shardsAcknowledged; - } - public boolean isShardsAcknowledged() { return shardsAcknowledged; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index e2f4d7697b62d..c3c6de5355af4 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -194,6 +194,16 @@ public synchronized void addSettingsUpdateConsumer(Setting setting, Consu addSettingsUpdater(setting.newUpdater(consumer, logger, validator)); } + /** + * Adds a settings consumer that is only executed if any setting in the supplied list of settings is changed. In that case all the + * settings are specified in the argument are returned. + * + * Also automatically adds empty consumers for all settings in order to activate logging + */ + public synchronized void addSettingsUpdateConsumer(Consumer consumer, List> settings) { + addSettingsUpdater(Setting.groupedSettingsUpdater(consumer, logger, settings)); + } + /** * Adds a settings consumer for affix settings. Affix settings have a namespace associated to it that needs to be available to the * consumer in order to be processed correctly. diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index fd91a8a7601c6..f7f67e424cc8d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -509,10 +509,10 @@ public Tuple getValue(Settings current, Settings previous) { @Override public void apply(Tuple value, Settings current, Settings previous) { if (aSettingUpdater.hasChanged(current, previous)) { - logger.info("updating [{}] from [{}] to [{}]", aSetting.key, aSetting.getRaw(previous), aSetting.getRaw(current)); + logSettingUpdate(aSetting, current, previous, logger); } if (bSettingUpdater.hasChanged(current, previous)) { - logger.info("updating [{}] from [{}] to [{}]", bSetting.key, bSetting.getRaw(previous), bSetting.getRaw(current)); + logSettingUpdate(bSetting, current, previous, logger); } consumer.accept(value.v1(), value.v2()); } @@ -524,6 +524,46 @@ public String toString() { }; } + static AbstractScopedSettings.SettingUpdater groupedSettingsUpdater(Consumer consumer, Logger logger, + final List> configuredSettings) { + + return new AbstractScopedSettings.SettingUpdater() { + + private Settings get(Settings settings) { + return settings.filter(s -> { + for (Setting setting : configuredSettings) { + if (setting.key.match(s)) { + return true; + } + } + return false; + }); + } + + @Override + public boolean hasChanged(Settings current, Settings previous) { + Settings currentSettings = get(current); + Settings previousSettings = get(previous); + return currentSettings.equals(previousSettings) == false; + } + + @Override + public Settings getValue(Settings current, Settings previous) { + return get(current); + } + + @Override + public void apply(Settings value, Settings current, Settings previous) { + consumer.accept(value); + } + + @Override + public String toString() { + return "Updater grouped: " + configuredSettings.stream().map(Setting::getKey).collect(Collectors.joining(", ")); + } + }; + } + public static class AffixSetting extends Setting { private final AffixKey key; private final Function> delegateFactory; @@ -541,7 +581,7 @@ boolean isGroupSetting() { } private Stream matchStream(Settings settings) { - return settings.keySet().stream().filter((key) -> match(key)).map(settingKey -> key.getConcreteString(settingKey)); + return settings.keySet().stream().filter(this::match).map(key::getConcreteString); } public Set getSettingsDependencies(String settingsKey) { @@ -812,9 +852,7 @@ public Settings getValue(Settings current, Settings previous) { @Override public void apply(Settings value, Settings current, Settings previous) { - if (logger.isInfoEnabled()) { // getRaw can create quite some objects - logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current)); - } + Setting.logSettingUpdate(GroupSetting.this, current, previous, logger); consumer.accept(value); } @@ -902,7 +940,7 @@ public T getValue(Settings current, Settings previous) { @Override public void apply(T value, Settings current, Settings previous) { - logger.info("updating [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current)); + logSettingUpdate(Setting.this, current, previous, logger); consumer.accept(value); } } @@ -1138,6 +1176,16 @@ private static String arrayToParsableString(List array) { } } + static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) { + if (logger.isInfoEnabled()) { + if (setting.isFiltered()) { + logger.info("updating [{}]", setting.key); + } else { + logger.info("updating [{}] from [{}] to [{}]", setting.key, setting.getRaw(previous), setting.getRaw(current)); + } + } + } + public static Setting groupSetting(String key, Property... properties) { return groupSetting(key, (s) -> {}, properties); } @@ -1308,8 +1356,8 @@ public static final class AffixKey implements Key { if (suffix == null) { pattern = Pattern.compile("(" + Pattern.quote(prefix) + "((?:[-\\w]+[.])*[-\\w]+$))"); } else { - // the last part of this regexp is for lists since they are represented as x.${namespace}.y.1, x.${namespace}.y.2 - pattern = Pattern.compile("(" + Pattern.quote(prefix) + "([-\\w]+)\\." + Pattern.quote(suffix) + ")(?:\\.\\d+)?"); + // the last part of this regexp is to support both list and group keys + pattern = Pattern.compile("(" + Pattern.quote(prefix) + "([-\\w]+)\\." + Pattern.quote(suffix) + ")(?:\\..*)?"); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java index 9c90b3bbde313..9cc5cf7bd8188 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ReleasableLock.java @@ -74,7 +74,7 @@ private boolean removeCurrentThread() { return true; } - public Boolean isHeldByCurrentThread() { + public boolean isHeldByCurrentThread() { if (holdingThreads == null) { throw new UnsupportedOperationException("asserts must be enabled"); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 7feaeb63ac36f..eea63dec94bf2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -817,6 +817,12 @@ public final boolean refreshNeeded() { // NOTE: do NOT rename this to something containing flush or refresh! public abstract void writeIndexingBuffer() throws EngineException; + /** + * Checks if this engine should be flushed periodically. + * This check is mainly based on the uncommitted translog size and the translog flush threshold setting. + */ + public abstract boolean shouldPeriodicallyFlush(); + /** * Flushes the state of the engine including the transaction log, clearing memory. * diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 97a6403ec3b23..c98b7763d1dbd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1462,6 +1462,31 @@ final boolean tryRenewSyncCommit() { return renewed; } + @Override + public boolean shouldPeriodicallyFlush() { + ensureOpen(); + final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes(); + final long uncommittedSizeOfCurrentCommit = translog.uncommittedSizeInBytes(); + if (uncommittedSizeOfCurrentCommit < flushThreshold) { + return false; + } + /* + * We should only flush ony if the shouldFlush condition can become false after flushing. + * This condition will change if the `uncommittedSize` of the new commit is smaller than + * the `uncommittedSize` of the current commit. This method is to maintain translog only, + * thus the IndexWriter#hasUncommittedChanges condition is not considered. + */ + final long uncommittedSizeOfNewCommit = translog.sizeOfGensAboveSeqNoInBytes(localCheckpointTracker.getCheckpoint() + 1); + /* + * If flushThreshold is too small, we may repeatedly flush even there is no uncommitted operation + * as #sizeOfGensAboveSeqNoInByte and #uncommittedSizeInBytes can return different values. + * An empty translog file has non-zero `uncommittedSize` (the translog header), and method #sizeOfGensAboveSeqNoInBytes can + * return 0 now(no translog gen contains ops above local checkpoint) but method #uncommittedSizeInBytes will return an actual + * non-zero value after rolling a new translog generation. This can be avoided by checking the actual uncommitted operations. + */ + return uncommittedSizeOfNewCommit < uncommittedSizeOfCurrentCommit && translog.uncommittedOperations() > 0; + } + @Override public CommitId flush() throws EngineException { return flush(false, false); @@ -1492,7 +1517,9 @@ public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineExcepti logger.trace("acquired flush lock immediately"); } try { - if (indexWriter.hasUncommittedChanges() || force) { + // Only flush if (1) Lucene has uncommitted docs, or (2) forced by caller, or (3) the + // newly created commit points to a different translog generation (can free translog) + if (indexWriter.hasUncommittedChanges() || force || shouldPeriodicallyFlush()) { ensureCanFlush(); try { translog.rollGeneration(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 3ace9ededc5b3..711fe68bf6593 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1298,8 +1298,11 @@ public void openIndexAndCreateTranslog(boolean forceNewHistoryUUID, long globalC assert commitInfo.localCheckpoint >= globalCheckpoint : "trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint [" + globalCheckpoint + "]"; - final List existingCommits = DirectoryReader.listCommits(store.directory()); - assert existingCommits.size() == 1 : "Open index create translog should have one commit, commits[" + existingCommits + "]"; + // This assertion is only guaranteed if all nodes are on 6.2+. + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_2_0)) { + final List existingCommits = DirectoryReader.listCommits(store.directory()); + assert existingCommits.size() == 1 : "Open index create translog should have one commit, commits[" + existingCommits + "]"; + } } globalCheckpointTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "opening index with a new translog"); innerOpenEngineAndTranslog(EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG, forceNewHistoryUUID); @@ -1597,17 +1600,16 @@ public boolean restoreFromRepository(Repository repository) { } /** - * Tests whether or not the translog should be flushed. This test is based on the current size of the translog comparted to the - * configured flush threshold size. + * Tests whether or not the engine should be flushed periodically. + * This test is based on the current size of the translog compared to the configured flush threshold size. * - * @return {@code true} if the translog should be flushed + * @return {@code true} if the engine should be flushed */ - boolean shouldFlush() { + boolean shouldPeriodicallyFlush() { final Engine engine = getEngineOrNull(); if (engine != null) { try { - final Translog translog = engine.getTranslog(); - return translog.shouldFlush(); + return engine.shouldPeriodicallyFlush(); } catch (final AlreadyClosedException e) { // we are already closed, no need to flush or roll } @@ -2361,7 +2363,7 @@ public Translog.Durability getTranslogDurability() { * executed asynchronously on the flush thread pool. */ public void afterWriteOperation() { - if (shouldFlush() || shouldRollTranslogGeneration()) { + if (shouldPeriodicallyFlush() || shouldRollTranslogGeneration()) { if (flushOrRollRunning.compareAndSet(false, true)) { /* * We have to check again since otherwise there is a race when a thread passes the first check next to another thread which @@ -2371,7 +2373,7 @@ public void afterWriteOperation() { * Additionally, a flush implicitly executes a translog generation roll so if we execute a flush then we do not need to * check if we should roll the translog generation. */ - if (shouldFlush()) { + if (shouldPeriodicallyFlush()) { logger.debug("submitting async flush request"); final AbstractRunnable flush = new AbstractRunnable() { @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index b4bf6173f74cf..3cbc8fc530539 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -436,7 +436,7 @@ private long sizeInBytesByMinGen(long minGeneration) { /** * Returns the size in bytes of the translog files with ops above the given seqNo */ - private long sizeOfGensAboveSeqNoInBytes(long minSeqNo) { + public long sizeOfGensAboveSeqNoInBytes(long minSeqNo) { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); return readersAboveMinSeqNo(minSeqNo).mapToLong(BaseTranslogReader::sizeInBytes).sum(); @@ -523,17 +523,6 @@ public Location add(final Operation operation) throws IOException { } } - /** - * Tests whether or not the translog should be flushed. This test is based on the current size - * of the translog comparted to the configured flush threshold size. - * - * @return {@code true} if the translog should be flushed - */ - public boolean shouldFlush() { - final long size = this.uncommittedSizeInBytes(); - return size > this.indexSettings.getFlushThresholdSize().getBytes(); - } - /** * Tests whether or not the translog generation should be rolled to a new generation. This test * is based on the size of the current generation compared to the configured generation diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 8cf4707262ed6..e5442c9a2f43f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.rest.action.admin.indices; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.support.IndicesOptions; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java index b0c8122d4dfa4..faae93803c84b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java @@ -36,14 +36,6 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestIndicesAliasesAction extends BaseRestHandler { - static final ObjectParser PARSER = new ObjectParser<>("aliases"); - static { - PARSER.declareObjectArray((request, actions) -> { - for (AliasActions action: actions) { - request.addAliasAction(action); - } - }, AliasActions.PARSER, new ParseField("actions")); - } @Override public String getName() { @@ -61,7 +53,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); try (XContentParser parser = request.contentParser()) { - PARSER.parse(parser, indicesAliasesRequest, null); + IndicesAliasesRequest.PARSER.parse(parser, indicesAliasesRequest, null); } if (indicesAliasesRequest.getAliasActions().isEmpty()) { throw new IllegalArgumentException("No action specified"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregation.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregation.java index 9a22b2e378140..8147f94487f9b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregation.java @@ -52,6 +52,9 @@ static XContentBuilder bucketToXContent(CompositeAggregation.Bucket bucket, } static XContentBuilder toXContentFragment(CompositeAggregation aggregation, XContentBuilder builder, Params params) throws IOException { + if (aggregation.afterKey() != null) { + buildCompositeMap("after_key", aggregation.afterKey(), builder); + } builder.startArray(CommonFields.BUCKETS.getPreferredName()); for (CompositeAggregation.Bucket bucket : aggregation.getBuckets()) { bucketToXContent(bucket, builder, params); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index e822480f9150d..830aba3bcf1e1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -136,14 +136,15 @@ public InternalAggregation buildAggregation(long zeroBucket) throws IOException int docCount = bucketDocCount(slot); buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs); } - return new InternalComposite(name, size, sourceNames, formats, Arrays.asList(buckets), reverseMuls, + CompositeKey lastBucket = num > 0 ? buckets[num-1].getRawKey() : null; + return new InternalComposite(name, size, sourceNames, formats, Arrays.asList(buckets), lastBucket, reverseMuls, pipelineAggregators(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { final int[] reverseMuls = getReverseMuls(); - return new InternalComposite(name, size, sourceNames, formats, Collections.emptyList(), reverseMuls, + return new InternalComposite(name, size, sourceNames, formats, Collections.emptyList(), null, reverseMuls, pipelineAggregators(), metaData()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeKey.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeKey.java index 6f3aacc9f8250..51c5a7c5a887f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeKey.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeKey.java @@ -19,18 +19,38 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; import java.util.Arrays; /** * A key that is composed of multiple {@link Comparable} values. */ -class CompositeKey { +class CompositeKey implements Writeable { private final Comparable[] values; CompositeKey(Comparable... values) { this.values = values; } + CompositeKey(StreamInput in) throws IOException { + values = new Comparable[in.readVInt()]; + for (int i = 0; i < values.length; i++) { + values[i] = (Comparable) in.readGenericValue(); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(values.length); + for (int i = 0; i < values.length; i++) { + out.writeGenericValue(values[i]); + } + } + Comparable[] values() { return values; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 85d172907e013..2e06d7c9fe30b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -74,7 +74,7 @@ public abstract class CompositeValuesSourceBuilder buckets; + private final CompositeKey afterKey; private final int[] reverseMuls; private final List sourceNames; private final List formats; InternalComposite(String name, int size, List sourceNames, List formats, - List buckets, int[] reverseMuls, + List buckets, CompositeKey afterKey, int[] reverseMuls, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.sourceNames = sourceNames; this.formats = formats; this.buckets = buckets; + this.afterKey = afterKey; this.size = size; this.reverseMuls = reverseMuls; } @@ -71,7 +72,7 @@ public InternalComposite(StreamInput in) throws IOException { this.sourceNames = in.readList(StreamInput::readString); this.formats = new ArrayList<>(sourceNames.size()); for (int i = 0; i < sourceNames.size(); i++) { - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { formats.add(in.readNamedWriteable(DocValueFormat.class)); } else { formats.add(DocValueFormat.RAW); @@ -79,19 +80,30 @@ public InternalComposite(StreamInput in) throws IOException { } this.reverseMuls = in.readIntArray(); this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, formats, reverseMuls)); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + this.afterKey = in.readBoolean() ? new CompositeKey(in) : null; + } else { + this.afterKey = buckets.size() > 0 ? buckets.get(buckets.size()-1).key : null; + } } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(size); out.writeStringList(sourceNames); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { for (DocValueFormat format : formats) { out.writeNamedWriteable(format); } } out.writeIntArray(reverseMuls); out.writeList(buckets); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeBoolean(afterKey != null); + if (afterKey != null) { + afterKey.writeTo(out); + } + } } @Override @@ -105,8 +117,14 @@ public String getWriteableName() { } @Override - public InternalComposite create(List buckets) { - return new InternalComposite(name, size, sourceNames, formats, buckets, reverseMuls, pipelineAggregators(), getMetaData()); + public InternalComposite create(List newBuckets) { + /** + * This is used by pipeline aggregations to filter/remove buckets so we + * keep the afterKey of the original aggregation in order + * to be able to retrieve the next page even if all buckets have been filtered. + */ + return new InternalComposite(name, size, sourceNames, formats, newBuckets, afterKey, + reverseMuls, pipelineAggregators(), getMetaData()); } @Override @@ -126,7 +144,10 @@ public List getBuckets() { @Override public Map afterKey() { - return buckets.size() > 0 ? buckets.get(buckets.size()-1).getKey() : null; + if (afterKey != null) { + return new ArrayMap(sourceNames, formats, afterKey.values()); + } + return null; } // Visible for tests @@ -169,7 +190,8 @@ public InternalAggregation doReduce(List aggregations, Redu reduceContext.consumeBucketsAndMaybeBreak(1); result.add(reduceBucket); } - return new InternalComposite(name, size, sourceNames, formats, result, reverseMuls, pipelineAggregators(), metaData); + final CompositeKey lastKey = result.size() > 0 ? result.get(result.size()-1).getRawKey() : null; + return new InternalComposite(name, size, sourceNames, formats, result, lastKey, reverseMuls, pipelineAggregators(), metaData); } @Override @@ -177,12 +199,13 @@ protected boolean doEquals(Object obj) { InternalComposite that = (InternalComposite) obj; return Objects.equals(size, that.size) && Objects.equals(buckets, that.buckets) && + Objects.equals(afterKey, that.afterKey) && Arrays.equals(reverseMuls, that.reverseMuls); } @Override protected int doHashCode() { - return Objects.hash(size, buckets, Arrays.hashCode(reverseMuls)); + return Objects.hash(size, buckets, afterKey, Arrays.hashCode(reverseMuls)); } private static class BucketIterator implements Comparable { @@ -226,11 +249,7 @@ static class InternalBucket extends InternalMultiBucketAggregation.InternalBucke @SuppressWarnings("unchecked") InternalBucket(StreamInput in, List sourceNames, List formats, int[] reverseMuls) throws IOException { - final Comparable[] values = new Comparable[in.readVInt()]; - for (int i = 0; i < values.length; i++) { - values[i] = (Comparable) in.readGenericValue(); - } - this.key = new CompositeKey(values); + this.key = new CompositeKey(in); this.docCount = in.readVLong(); this.aggregations = InternalAggregations.readAggregations(in); this.reverseMuls = reverseMuls; @@ -240,10 +259,7 @@ static class InternalBucket extends InternalMultiBucketAggregation.InternalBucke @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(key.size()); - for (int i = 0; i < key.size(); i++) { - out.writeGenericValue(key.get(i)); - } + key.writeTo(out); out.writeVLong(docCount); aggregations.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java index a6c3fd3fb6f08..e7d6f775f1d87 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/ParsedComposite.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -33,15 +34,26 @@ public class ParsedComposite extends ParsedMultiBucketAggregation(ParsedComposite.class.getSimpleName(), true, ParsedComposite::new); static { + PARSER.declareField(ParsedComposite::setAfterKey, (p, c) -> p.mapOrdered(), new ParseField("after_key"), + ObjectParser.ValueType.OBJECT); declareMultiBucketAggregationFields(PARSER, parser -> ParsedComposite.ParsedBucket.fromXContent(parser), parser -> null ); } + private Map afterKey; + public static ParsedComposite fromXContent(XContentParser parser, String name) throws IOException { ParsedComposite aggregation = PARSER.parse(parser, null); aggregation.setName(name); + if (aggregation.afterKey == null && aggregation.getBuckets().size() > 0) { + /** + * Previous versions (< 6.3) don't send afterKey + * in the response so we set it as the last returned buckets. + */ + aggregation.setAfterKey(aggregation.getBuckets().get(aggregation.getBuckets().size()-1).key); + } return aggregation; } @@ -57,9 +69,16 @@ public List getBuckets() { @Override public Map afterKey() { + if (afterKey != null) { + return afterKey; + } return buckets.size() > 0 ? buckets.get(buckets.size()-1).getKey() : null; } + private void setAfterKey(Map afterKey) { + this.afterKey = afterKey; + } + @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { return CompositeAggregation.toXContentFragment(this, builder, params); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java index 4a4aa736332cf..01c2457f96744 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/AliasActionsTests.java @@ -21,8 +21,10 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -30,13 +32,15 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.HashMap; import java.util.Map; import java.util.Objects; +import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomAliasAction; +import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomMap; +import static org.elasticsearch.index.alias.RandomAliasActionsGenerator.randomRouting; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class AliasActionsTests extends ESTestCase { @@ -58,8 +62,7 @@ public void testEmptyIndex() { Exception e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(randomFrom(AliasActions.Type.values())).index(null)); assertEquals("[index] can't be empty string", e.getMessage()); - e = expectThrows(IllegalArgumentException.class, - () -> new AliasActions(randomFrom(AliasActions.Type.values())).index("")); + e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(randomFrom(AliasActions.Type.values())).index("")); assertEquals("[index] can't be empty string", e.getMessage()); e = expectThrows(IllegalArgumentException.class, () -> new AliasActions(randomFrom(AliasActions.Type.values())).indices((String[]) null)); @@ -110,8 +113,10 @@ public void testParseAdd() throws IOException { Object searchRouting = randomBoolean() ? randomRouting() : null; Object indexRouting = randomBoolean() ? randomBoolean() ? searchRouting : randomRouting() : null; XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); - b.startObject(); { - b.startObject("add"); { + b.startObject(); + { + b.startObject("add"); + { if (indices.length > 1 || randomBoolean()) { b.array("indices", indices); } else { @@ -161,8 +166,10 @@ public void testParseAddDefaultRouting() throws IOException { Object searchRouting = randomRouting(); Object indexRouting = randomRouting(); XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); - b.startObject(); { - b.startObject("add"); { + b.startObject(); + { + b.startObject("add"); + { b.field("index", index); b.field("alias", alias); if (randomBoolean()) { @@ -191,8 +198,10 @@ public void testParseRemove() throws IOException { String[] indices = generateRandomStringArray(10, 5, false, false); String[] aliases = generateRandomStringArray(10, 5, false, false); XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); - b.startObject(); { - b.startObject("remove"); { + b.startObject(); + { + b.startObject("remove"); + { if (indices.length > 1 || randomBoolean()) { b.array("indices", indices); } else { @@ -217,10 +226,12 @@ public void testParseRemove() throws IOException { } public void testParseRemoveIndex() throws IOException { - String[] indices = randomBoolean() ? new String[] {randomAlphaOfLength(5)} : generateRandomStringArray(10, 5, false, false); + String[] indices = randomBoolean() ? new String[] { randomAlphaOfLength(5) } : generateRandomStringArray(10, 5, false, false); XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); - b.startObject(); { - b.startObject("remove_index"); { + b.startObject(); + { + b.startObject("remove_index"); + { if (indices.length > 1 || randomBoolean()) { b.array("indices", indices); } else { @@ -241,8 +252,10 @@ public void testParseRemoveIndex() throws IOException { public void testParseIndexAndIndicesThrowsError() throws IOException { XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); - b.startObject(); { - b.startObject(randomFrom("add", "remove")); { + b.startObject(); + { + b.startObject(randomFrom("add", "remove")); + { b.field("index", randomAlphaOfLength(5)); b.array("indices", generateRandomStringArray(10, 5, false, false)); b.field("alias", randomAlphaOfLength(5)); @@ -259,8 +272,10 @@ public void testParseIndexAndIndicesThrowsError() throws IOException { public void testParseAliasAndAliasesThrowsError() throws IOException { XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); - b.startObject(); { - b.startObject(randomFrom("add", "remove")); { + b.startObject(); + { + b.startObject(randomFrom("add", "remove")); + { b.field("index", randomAlphaOfLength(5)); b.field("alias", randomAlphaOfLength(5)); b.array("aliases", generateRandomStringArray(10, 5, false, false)); @@ -311,38 +326,17 @@ public void testRoundTrip() throws IOException { } } - - private Map randomMap(int maxDepth) { - int members = between(0, 5); - Map result = new HashMap<>(members); - for (int i = 0; i < members; i++) { - Object value; - switch (between(0, 3)) { - case 0: - if (maxDepth > 0) { - value = randomMap(maxDepth - 1); - } else { - value = randomAlphaOfLength(5); - } - break; - case 1: - value = randomAlphaOfLength(5); - break; - case 2: - value = randomBoolean(); - break; - case 3: - value = randomLong(); - break; - default: - throw new UnsupportedOperationException(); + public void testFromToXContent() throws IOException { + for (int runs = 0; runs < 20; runs++) { + AliasActions action = randomAliasAction(); + XContentType xContentType = randomFrom(XContentType.values()); + BytesReference shuffled = toShuffledXContent(action, xContentType, ToXContent.EMPTY_PARAMS, false, "filter"); + AliasActions parsedAction; + try (XContentParser parser = createParser(xContentType.xContent(), shuffled)) { + parsedAction = AliasActions.fromXContent(parser); + assertNull(parser.nextToken()); } - result.put(randomAlphaOfLength(5), value); + assertThat(parsedAction, equalTo(action)); } - return result; - } - - private Object randomRouting() { - return randomBoolean() ? randomAlphaOfLength(5) : randomInt(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index 6bfb78a2ade9c..37cc11da8b7b6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -21,6 +21,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.Version; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -113,7 +114,9 @@ public void testDeltas() { // change an attribute Map attrs = new HashMap<>(node.getAttributes()); attrs.put("new", "new"); - node = new DiscoveryNode(node.getName(), node.getId(), node.getAddress(), attrs, node.getRoles(), node.getVersion()); + final TransportAddress nodeAddress = node.getAddress(); + node = new DiscoveryNode(node.getName(), node.getId(), node.getEphemeralId(), nodeAddress.address().getHostString(), + nodeAddress.getAddress(), nodeAddress, attrs, node.getRoles(), node.getVersion()); } nodesB.add(node); } @@ -140,14 +143,21 @@ public void testDeltas() { DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); - if (Objects.equals(masterAId, masterBId)) { - assertFalse(delta.masterNodeChanged()); + if (masterA == null) { assertThat(delta.previousMasterNode(), nullValue()); + } else { + assertThat(delta.previousMasterNode().getId(), equalTo(masterAId)); + } + if (masterB == null) { assertThat(delta.newMasterNode(), nullValue()); + } else { + assertThat(delta.newMasterNode().getId(), equalTo(masterBId)); + } + + if (Objects.equals(masterAId, masterBId)) { + assertFalse(delta.masterNodeChanged()); } else { assertTrue(delta.masterNodeChanged()); - assertThat(delta.newMasterNode() != null ? delta.newMasterNode().getId() : null, equalTo(masterBId)); - assertThat(delta.previousMasterNode() != null ? delta.previousMasterNode().getId() : null, equalTo(masterAId)); } Set newNodes = new HashSet<>(nodesB); diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 4a4beb2e0e3ef..180f11730dfed 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -38,6 +38,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -712,4 +713,79 @@ public void testTimeValue() { assertThat(setting.get(Settings.EMPTY).getMillis(), equalTo(random.getMillis() * factor)); } + public void testSettingsGroupUpdater() { + Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); + Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + Arrays.asList(intSetting, intSetting2)); + + Settings current = Settings.builder().put("prefix.foo", 123).put("prefix.same", 5555).build(); + Settings previous = Settings.builder().put("prefix.foo", 321).put("prefix.same", 5555).build(); + assertTrue(updater.apply(current, previous)); + } + + public void testSettingsGroupUpdaterRemoval() { + Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); + Setting intSetting2 = Setting.intSetting("prefix.same", 1, Property.NodeScope, Property.Dynamic); + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + Arrays.asList(intSetting, intSetting2)); + + Settings current = Settings.builder().put("prefix.same", 5555).build(); + Settings previous = Settings.builder().put("prefix.foo", 321).put("prefix.same", 5555).build(); + assertTrue(updater.apply(current, previous)); + } + + public void testSettingsGroupUpdaterWithAffixSetting() { + Setting intSetting = Setting.intSetting("prefix.foo", 1, Property.NodeScope, Property.Dynamic); + Setting.AffixSetting prefixKeySetting = + Setting.prefixKeySetting("prefix.foo.bar.", key -> Setting.simpleString(key, Property.NodeScope, Property.Dynamic)); + Setting.AffixSetting affixSetting = + Setting.affixKeySetting("prefix.foo.", "suffix", key -> Setting.simpleString(key,Property.NodeScope, Property.Dynamic)); + + AbstractScopedSettings.SettingUpdater updater = Setting.groupedSettingsUpdater(s -> {}, logger, + Arrays.asList(intSetting, prefixKeySetting, affixSetting)); + + Settings.Builder currentSettingsBuilder = Settings.builder() + .put("prefix.foo.bar.baz", "foo") + .put("prefix.foo.infix.suffix", "foo"); + Settings.Builder previousSettingsBuilder = Settings.builder() + .put("prefix.foo.bar.baz", "foo") + .put("prefix.foo.infix.suffix", "foo"); + boolean removePrefixKeySetting = randomBoolean(); + boolean changePrefixKeySetting = randomBoolean(); + boolean removeAffixKeySetting = randomBoolean(); + boolean changeAffixKeySetting = randomBoolean(); + boolean removeAffixNamespace = randomBoolean(); + + if (removePrefixKeySetting) { + previousSettingsBuilder.remove("prefix.foo.bar.baz"); + } + if (changePrefixKeySetting) { + currentSettingsBuilder.put("prefix.foo.bar.baz", "bar"); + } + if (removeAffixKeySetting) { + previousSettingsBuilder.remove("prefix.foo.infix.suffix"); + } + if (changeAffixKeySetting) { + currentSettingsBuilder.put("prefix.foo.infix.suffix", "bar"); + } + if (removeAffixKeySetting == false && changeAffixKeySetting == false && removeAffixNamespace) { + currentSettingsBuilder.remove("prefix.foo.infix.suffix"); + currentSettingsBuilder.put("prefix.foo.infix2.suffix", "bar"); + previousSettingsBuilder.put("prefix.foo.infix2.suffix", "bar"); + } + + boolean expectedChange = removeAffixKeySetting || removePrefixKeySetting || changeAffixKeySetting || changePrefixKeySetting + || removeAffixNamespace; + assertThat(updater.apply(currentSettingsBuilder.build(), previousSettingsBuilder.build()), is(expectedChange)); + } + + public void testAffixNamespacesWithGroupSetting() { + final Setting.AffixSetting affixSetting = + Setting.affixKeySetting("prefix.","suffix", + (key) -> Setting.groupSetting(key + ".", Setting.Property.Dynamic, Setting.Property.NodeScope)); + + assertThat(affixSetting.getNamespaces(Settings.builder().put("prefix.infix.suffix", "anything").build()), hasSize(1)); + assertThat(affixSetting.getNamespaces(Settings.builder().put("prefix.infix.suffix.anything", "anything").build()), hasSize(1)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java index 9e6d4be7095f0..dfece2d9d459c 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsFilterTests.java @@ -18,16 +18,22 @@ */ package org.elasticsearch.common.settings; -import org.elasticsearch.common.Strings; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.logging.ServerLoggers; +import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.rest.FakeRestRequest; import java.io.IOException; import java.util.Arrays; import java.util.HashSet; +import java.util.function.Consumer; import static org.hamcrest.CoreMatchers.equalTo; @@ -100,7 +106,43 @@ public void testSettingsFiltering() throws IOException { .build(), "a.b.*.d" ); + } + + public void testFilteredSettingIsNotLogged() throws Exception { + Settings oldSettings = Settings.builder().put("key", "old").build(); + Settings newSettings = Settings.builder().put("key", "new").build(); + + Setting filteredSetting = Setting.simpleString("key", Property.Filtered); + assertExpectedLogMessages((testLogger) -> Setting.logSettingUpdate(filteredSetting, newSettings, oldSettings, testLogger), + new MockLogAppender.SeenEventExpectation("secure logging", "org.elasticsearch.test", Level.INFO, "updating [key]"), + new MockLogAppender.UnseenEventExpectation("unwanted old setting name", "org.elasticsearch.test", Level.INFO, "*old*"), + new MockLogAppender.UnseenEventExpectation("unwanted new setting name", "org.elasticsearch.test", Level.INFO, "*new*") + ); + } + + public void testRegularSettingUpdateIsFullyLogged() throws Exception { + Settings oldSettings = Settings.builder().put("key", "old").build(); + Settings newSettings = Settings.builder().put("key", "new").build(); + + Setting regularSetting = Setting.simpleString("key"); + assertExpectedLogMessages((testLogger) -> Setting.logSettingUpdate(regularSetting, newSettings, oldSettings, testLogger), + new MockLogAppender.SeenEventExpectation("regular logging", "org.elasticsearch.test", Level.INFO, + "updating [key] from [old] to [new]")); + } + private void assertExpectedLogMessages(Consumer consumer, + MockLogAppender.LoggingExpectation ... expectations) throws IllegalAccessException { + Logger testLogger = Loggers.getLogger("org.elasticsearch.test"); + MockLogAppender appender = new MockLogAppender(); + ServerLoggers.addAppender(testLogger, appender); + try { + appender.start(); + Arrays.stream(expectations).forEach(appender::addExpectation); + consumer.accept(testLogger); + appender.assertAllExpectationsMatched(); + } finally { + ServerLoggers.removeAppender(testLogger, appender); + } } private void testFiltering(Settings source, Settings filtered, String... patterns) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 2a7e49aa66b61..d375790a1cc74 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.engine; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -46,6 +47,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.search.IndexSearcher; @@ -163,6 +165,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -4439,4 +4442,37 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { assertThat(DirectoryReader.listCommits(store.directory()), contains(commits.get(commits.size() - 1))); } } + + public void testShouldPeriodicallyFlush() throws Exception { + assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false)); + int numDocs = between(10, 100); + for (int id = 0; id < numDocs; id++) { + final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); + engine.index(indexForDoc(doc)); + } + assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false)); + long flushThreshold = RandomNumbers.randomLongBetween(random(), 100, engine.getTranslog().uncommittedSizeInBytes()); + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b")).build(); + indexSettings.updateIndexMetaData(indexMetaData); + engine.onSettingsChanged(); + assertThat(engine.getTranslog().uncommittedOperations(), equalTo(numDocs)); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); + engine.flush(); + assertThat(engine.getTranslog().uncommittedOperations(), equalTo(0)); + // Stale operations skipped by Lucene but added to translog - still able to flush + for (int id = 0; id < numDocs; id++) { + final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); + final Engine.IndexResult result = engine.index(replicaIndexForDoc(doc, 1L, id, false)); + assertThat(result.isCreated(), equalTo(false)); + } + SegmentInfos lastCommitInfo = engine.getLastCommittedSegmentInfos(); + assertThat(engine.getTranslog().uncommittedOperations(), equalTo(numDocs)); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); + engine.flush(false, false); + assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo))); + assertThat(engine.getTranslog().uncommittedOperations(), equalTo(0)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index e02b6c04a89d3..601eb8e9b1d66 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -73,7 +72,6 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.io.UncheckedIOException; @@ -332,23 +330,23 @@ public void testMaybeFlush() throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); IndexShard shard = test.getShardOrNull(0); - assertFalse(shard.shouldFlush()); + assertFalse(shard.shouldPeriodicallyFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(117 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get(); client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); - assertFalse(shard.shouldFlush()); + assertFalse(shard.shouldPeriodicallyFlush()); shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, SourceToParse.source("test", "test", "1", new BytesArray("{}"), XContentType.JSON), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); - assertTrue(shard.shouldFlush()); + assertTrue(shard.shouldPeriodicallyFlush()); final Translog translog = shard.getEngine().getTranslog(); assertEquals(2, translog.uncommittedOperations()); client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertBusy(() -> { // this is async - assertFalse(shard.shouldFlush()); + assertFalse(shard.shouldPeriodicallyFlush()); }); assertEquals(0, translog.uncommittedOperations()); translog.sync(); @@ -364,7 +362,7 @@ public void testMaybeFlush() throws Exception { assertBusy(() -> { // this is async logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), translog.uncommittedOperations(), translog.getGeneration()); - assertFalse(shard.shouldFlush()); + assertFalse(shard.shouldPeriodicallyFlush()); }); assertEquals(0, translog.uncommittedOperations()); } @@ -408,7 +406,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService test = indicesService.indexService(resolveIndex("test")); final IndexShard shard = test.getShardOrNull(0); - assertFalse(shard.shouldFlush()); + assertFalse(shard.shouldPeriodicallyFlush()); final String key; final boolean flush = randomBoolean(); if (flush) { @@ -423,7 +421,7 @@ public void testStressMaybeFlushOrRollTranslogGeneration() throws Exception { .setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE) .get(); - assertFalse(shard.shouldFlush()); + assertFalse(shard.shouldPeriodicallyFlush()); final AtomicBoolean running = new AtomicBoolean(true); final int numThreads = randomIntBetween(2, 4); final Thread[] threads = new Thread[numThreads]; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 2089c36d06bc0..69176b03942f6 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.recovery; +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; @@ -306,4 +307,30 @@ public void testSequenceBasedRecoveryKeepsTranslog() throws Exception { } } + /** + * This test makes sure that there is no infinite loop of flushing (the condition `shouldPeriodicallyFlush` eventually is false) + * in peer-recovery if a primary sends a fully-baked index commit. + */ + public void testShouldFlushAfterPeerRecovery() throws Exception { + try (ReplicationGroup shards = createGroup(0)) { + shards.startAll(); + int numDocs = shards.indexDocs(between(10, 100)); + final long translogSizeOnPrimary = shards.getPrimary().getTranslog().uncommittedSizeInBytes(); + shards.flush(); + + final IndexShard replica = shards.addReplica(); + IndexMetaData.Builder builder = IndexMetaData.builder(replica.indexSettings().getIndexMetaData()); + long flushThreshold = RandomNumbers.randomLongBetween(random(), 100, translogSizeOnPrimary); + builder.settings(Settings.builder().put(replica.indexSettings().getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b") + ); + replica.indexSettings().updateIndexMetaData(builder.build()); + replica.onSettingsChanged(); + shards.recoverReplica(replica); + // Make sure the flushing will eventually be completed (eg. `shouldPeriodicallyFlush` is false) + assertBusy(() -> assertThat(getEngine(replica).shouldPeriodicallyFlush(), equalTo(false))); + assertThat(replica.getTranslog().totalOperations(), equalTo(numDocs)); + shards.assertAllEqual(numDocs); + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index 0ebf957a8ddd1..094457a8bf4f6 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -129,6 +129,7 @@ public void testWithKeyword() throws Exception { return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=d}", result.afterKey().toString()); assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); @@ -146,6 +147,7 @@ public void testWithKeyword() throws Exception { .aggregateAfter(Collections.singletonMap("keyword", "a")); }, (result) -> { assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=d}", result.afterKey().toString()); assertEquals("{keyword=c}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=d}", result.getBuckets().get(1).getKeyAsString()); @@ -174,6 +176,7 @@ public void testWithKeywordMissingAfter() throws Exception { return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); }, (result) -> { assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=zoo}", result.afterKey().toString()); assertEquals("{keyword=bar}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=delta}", result.getBuckets().get(1).getKeyAsString()); @@ -193,6 +196,7 @@ public void testWithKeywordMissingAfter() throws Exception { .aggregateAfter(Collections.singletonMap("keyword", "car")); }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=zoo}", result.afterKey().toString()); assertEquals("{keyword=delta}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=foo}", result.getBuckets().get(1).getKeyAsString()); @@ -210,6 +214,7 @@ public void testWithKeywordMissingAfter() throws Exception { .aggregateAfter(Collections.singletonMap("keyword", "mar")); }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=bar}", result.afterKey().toString()); assertEquals("{keyword=foo}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=delta}", result.getBuckets().get(1).getKeyAsString()); @@ -240,6 +245,7 @@ public void testWithKeywordDesc() throws Exception { return new CompositeAggregationBuilder("name", Collections.singletonList(terms)); }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=a}", result.afterKey().toString()); assertEquals("{keyword=a}", result.getBuckets().get(2).getKeyAsString()); assertEquals(2L, result.getBuckets().get(2).getDocCount()); assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString()); @@ -258,6 +264,8 @@ public void testWithKeywordDesc() throws Exception { .aggregateAfter(Collections.singletonMap("keyword", "c")); }, (result) -> { + assertEquals(result.afterKey().toString(), "{keyword=a}"); + assertEquals("{keyword=a}", result.afterKey().toString()); assertEquals(1, result.getBuckets().size()); assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); @@ -285,6 +293,7 @@ public void testMultiValuedWithKeyword() throws Exception { }, (result) -> { assertEquals(5, result.getBuckets().size()); + assertEquals("{keyword=z}", result.afterKey().toString()); assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=b}", result.getBuckets().get(1).getKeyAsString()); @@ -307,6 +316,7 @@ public void testMultiValuedWithKeyword() throws Exception { }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=z}", result.afterKey().toString()); assertEquals("{keyword=c}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=d}", result.getBuckets().get(1).getKeyAsString()); @@ -338,6 +348,7 @@ public void testMultiValuedWithKeywordDesc() throws Exception { }, (result) -> { assertEquals(5, result.getBuckets().size()); + assertEquals("{keyword=a}", result.afterKey().toString()); assertEquals("{keyword=a}", result.getBuckets().get(4).getKeyAsString()); assertEquals(2L, result.getBuckets().get(4).getDocCount()); assertEquals("{keyword=b}", result.getBuckets().get(3).getKeyAsString()); @@ -361,6 +372,7 @@ public void testMultiValuedWithKeywordDesc() throws Exception { }, (result) -> { assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=a}", result.afterKey().toString()); assertEquals("{keyword=a}", result.getBuckets().get(1).getKeyAsString()); assertEquals(2L, result.getBuckets().get(1).getDocCount()); assertEquals("{keyword=b}", result.getBuckets().get(0).getKeyAsString()); @@ -395,6 +407,7 @@ public void testWithKeywordAndLong() throws Exception { ), (result) -> { assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=d, long=10}", result.afterKey().toString()); assertEquals("{keyword=a, long=0}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=a, long=100}", result.getBuckets().get(1).getKeyAsString()); @@ -416,6 +429,7 @@ public void testWithKeywordAndLong() throws Exception { ), (result) -> { assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=d, long=10}", result.afterKey().toString()); assertEquals("{keyword=c, long=100}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=d, long=10}", result.getBuckets().get(1).getKeyAsString()); @@ -451,6 +465,7 @@ public void testWithKeywordAndLongDesc() throws Exception { ), (result) -> { assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=a, long=0}", result.afterKey().toString()); assertEquals("{keyword=a, long=0}", result.getBuckets().get(3).getKeyAsString()); assertEquals(1L, result.getBuckets().get(3).getDocCount()); assertEquals("{keyword=a, long=100}", result.getBuckets().get(2).getKeyAsString()); @@ -471,6 +486,7 @@ public void testWithKeywordAndLongDesc() throws Exception { )).aggregateAfter(createAfterKey("keyword", "d", "long", 10L) ), (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{keyword=a, long=0}", result.afterKey().toString()); assertEquals("{keyword=a, long=0}", result.getBuckets().get(2).getKeyAsString()); assertEquals(1L, result.getBuckets().get(2).getDocCount()); assertEquals("{keyword=a, long=100}", result.getBuckets().get(1).getKeyAsString()); @@ -503,6 +519,7 @@ public void testMultiValuedWithKeywordAndLong() throws Exception { )) , (result) -> { assertEquals(10, result.getBuckets().size()); + assertEquals("{keyword=z, long=0}", result.afterKey().toString()); assertEquals("{keyword=a, long=0}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=a, long=100}", result.getBuckets().get(1).getKeyAsString()); @@ -536,6 +553,7 @@ public void testMultiValuedWithKeywordAndLong() throws Exception { ).aggregateAfter(createAfterKey("keyword", "c", "long", 10L)) , (result) -> { assertEquals(6, result.getBuckets().size()); + assertEquals("{keyword=z, long=100}", result.afterKey().toString()); assertEquals("{keyword=c, long=100}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=d, long=10}", result.getBuckets().get(1).getKeyAsString()); @@ -577,6 +595,7 @@ public void testMultiValuedWithKeywordAndLongDesc() throws Exception { ), (result) -> { assertEquals(10, result.getBuckets().size()); + assertEquals("{keyword=a, long=0}", result.afterKey().toString()); assertEquals("{keyword=a, long=0}", result.getBuckets().get(9).getKeyAsString()); assertEquals(1L, result.getBuckets().get(9).getDocCount()); assertEquals("{keyword=a, long=100}", result.getBuckets().get(8).getKeyAsString()); @@ -611,6 +630,7 @@ public void testMultiValuedWithKeywordAndLongDesc() throws Exception { ), (result) -> { assertEquals(2, result.getBuckets().size()); + assertEquals("{keyword=a, long=0}", result.afterKey().toString()); assertEquals("{keyword=a, long=0}", result.getBuckets().get(1).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); assertEquals("{keyword=a, long=100}", result.getBuckets().get(0).getKeyAsString()); @@ -644,6 +664,7 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { ) , (result) -> { assertEquals(10, result.getBuckets().size()); + assertEquals("{keyword=c, long=100, double=0.4}", result.afterKey().toString()); assertEquals("{keyword=a, long=0, double=0.09}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); assertEquals("{keyword=a, long=0, double=0.4}", result.getBuckets().get(1).getKeyAsString()); @@ -678,6 +699,7 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { ).aggregateAfter(createAfterKey("keyword", "a", "long", 100L, "double", 0.4d)) ,(result) -> { assertEquals(10, result.getBuckets().size()); + assertEquals("{keyword=z, long=0, double=0.09}", result.afterKey().toString()); assertEquals("{keyword=b, long=100, double=0.4}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=c, long=0, double=0.09}", result.getBuckets().get(1).getKeyAsString()); @@ -712,6 +734,7 @@ public void testMultiValuedWithKeywordLongAndDouble() throws Exception { ).aggregateAfter(createAfterKey("keyword", "z", "long", 100L, "double", 0.4d)) , (result) -> { assertEquals(0, result.getBuckets().size()); + assertNull(result.afterKey()); } ); } @@ -738,6 +761,7 @@ public void testWithDateHistogram() throws IOException { }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508371200000}", result.getBuckets().get(1).getKeyAsString()); @@ -757,6 +781,7 @@ public void testWithDateHistogram() throws IOException { }, (result) -> { assertEquals(2, result.getBuckets().size()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); assertEquals("{date=1508371200000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508457600000}", result.getBuckets().get(1).getKeyAsString()); @@ -788,6 +813,7 @@ public void testWithDateHistogramAndFormat() throws IOException { }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{date=2017-10-20}", result.afterKey().toString()); assertEquals("{date=2016-09-20}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=2017-10-19}", result.getBuckets().get(1).getKeyAsString()); @@ -808,6 +834,7 @@ public void testWithDateHistogramAndFormat() throws IOException { }, (result) -> { assertEquals(2, result.getBuckets().size()); + assertEquals("{date=2017-10-20}", result.afterKey().toString()); assertEquals("{date=2017-10-19}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=2017-10-20}", result.getBuckets().get(1).getKeyAsString()); @@ -871,6 +898,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { }, (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{date=1508454000000}", result.afterKey().toString()); assertEquals("{date=1474326000000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508367600000}", result.getBuckets().get(1).getKeyAsString()); @@ -891,6 +919,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { }, (result) -> { assertEquals(2, result.getBuckets().size()); + assertEquals("{date=1508454000000}", result.afterKey().toString()); assertEquals("{date=1508367600000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508454000000}", result.getBuckets().get(1).getKeyAsString()); @@ -924,6 +953,7 @@ public void testWithDateHistogramAndKeyword() throws IOException { ), (result) -> { assertEquals(7, result.getBuckets().size()); + assertEquals("{date=1508457600000, keyword=d}", result.afterKey().toString()); assertEquals("{date=1474329600000, keyword=b}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1474329600000, keyword=c}", result.getBuckets().get(1).getKeyAsString()); @@ -954,6 +984,7 @@ public void testWithDateHistogramAndKeyword() throws IOException { ).aggregateAfter(createAfterKey("date", 1508371200000L, "keyword", "g")) , (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{date=1508457600000, keyword=d}", result.afterKey().toString()); assertEquals("{date=1508457600000, keyword=a}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508457600000, keyword=c}", result.getBuckets().get(1).getKeyAsString()); @@ -986,6 +1017,7 @@ public void testWithKeywordAndHistogram() throws IOException { ) , (result) -> { assertEquals(7, result.getBuckets().size()); + assertEquals("{keyword=z, price=50.0}", result.afterKey().toString()); assertEquals("{keyword=a, price=100.0}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=b, price=50.0}", result.getBuckets().get(1).getKeyAsString()); @@ -1013,6 +1045,7 @@ public void testWithKeywordAndHistogram() throws IOException { ).aggregateAfter(createAfterKey("keyword", "c", "price", 50.0)) , (result) -> { assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=z, price=50.0}", result.afterKey().toString()); assertEquals("{keyword=c, price=100.0}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=d, price=100.0}", result.getBuckets().get(1).getKeyAsString()); @@ -1052,6 +1085,7 @@ public void testWithHistogramAndKeyword() throws IOException { ) , (result) -> { assertEquals(8, result.getBuckets().size()); + assertEquals("{histo=0.9, keyword=d}", result.afterKey().toString()); assertEquals("{histo=0.4, keyword=a}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{histo=0.4, keyword=b}", result.getBuckets().get(1).getKeyAsString()); @@ -1081,6 +1115,7 @@ public void testWithHistogramAndKeyword() throws IOException { ).aggregateAfter(createAfterKey("histo", 0.8d, "keyword", "b")) , (result) -> { assertEquals(3, result.getBuckets().size()); + assertEquals("{histo=0.9, keyword=d}", result.afterKey().toString()); assertEquals("{histo=0.8, keyword=z}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{histo=0.9, keyword=a}", result.getBuckets().get(1).getKeyAsString()); @@ -1114,6 +1149,7 @@ public void testWithKeywordAndDateHistogram() throws IOException { ) , (result) -> { assertEquals(7, result.getBuckets().size()); + assertEquals("{keyword=z, date_histo=1474329600000}", result.afterKey().toString()); assertEquals("{keyword=a, date_histo=1508457600000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=b, date_histo=1474329600000}", result.getBuckets().get(1).getKeyAsString()); @@ -1142,6 +1178,7 @@ public void testWithKeywordAndDateHistogram() throws IOException { ).aggregateAfter(createAfterKey("keyword","c", "date_histo", 1474329600000L)) , (result) -> { assertEquals(4, result.getBuckets().size()); + assertEquals("{keyword=z, date_histo=1474329600000}", result.afterKey().toString()); assertEquals("{keyword=c, date_histo=1508457600000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("{keyword=d, date_histo=1508457600000}", result.getBuckets().get(1).getKeyAsString()); @@ -1307,7 +1344,6 @@ private void addToDocument(Document doc, Map> keys) { } } - @SuppressWarnings("unchecked") private static Map createAfterKey(Object... fields) { assert fields.length % 2 == 0; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java index 322b70cb2d971..022f5e6abc13c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -161,7 +161,9 @@ protected InternalComposite createTestInstance(String name, List o1.compareKey(o2)); - return new InternalComposite(name, size, sourceNames, formats, buckets, reverseMuls, Collections.emptyList(), metaData); + CompositeKey lastBucket = buckets.size() > 0 ? buckets.get(buckets.size()-1).getRawKey() : null; + return new InternalComposite(name, size, sourceNames, formats, buckets, lastBucket, reverseMuls, + Collections.emptyList(), metaData); } @Override @@ -195,7 +197,8 @@ protected InternalComposite mutateInstance(InternalComposite instance) throws IO default: throw new AssertionError("illegal branch"); } - return new InternalComposite(instance.getName(), instance.getSize(), sourceNames, formats, buckets, reverseMuls, + CompositeKey lastBucket = buckets.size() > 0 ? buckets.get(buckets.size()-1).getRawKey() : null; + return new InternalComposite(instance.getName(), instance.getSize(), sourceNames, formats, buckets, lastBucket, reverseMuls, instance.pipelineAggregators(), metaData); } diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 9eeca0bd12d05..ad7002436c70e 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -21,10 +21,11 @@ import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.SeedUtils; + import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.settings.Settings; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -32,6 +33,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Random; import java.util.concurrent.ConcurrentHashMap; @@ -59,8 +61,17 @@ public static void ensureAllArraysAreReleased() throws Exception { masterCopy.keySet().retainAll(ACQUIRED_ARRAYS.keySet()); ACQUIRED_ARRAYS.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on if (!masterCopy.isEmpty()) { - final Object cause = masterCopy.entrySet().iterator().next().getValue(); - throw new RuntimeException(masterCopy.size() + " arrays have not been released", cause instanceof Throwable ? (Throwable) cause : null); + Iterator causes = masterCopy.values().iterator(); + Object firstCause = causes.next(); + RuntimeException exception = new RuntimeException(masterCopy.size() + " arrays have not been released", + firstCause instanceof Throwable ? (Throwable) firstCause : null); + while (causes.hasNext()) { + Object cause = causes.next(); + if (cause instanceof Throwable) { + exception.addSuppressed((Throwable) cause); + } + } + throw exception; } } } @@ -249,7 +260,9 @@ private abstract static class AbstractArrayWrapper { AbstractArrayWrapper(boolean clearOnResize) { this.clearOnResize = clearOnResize; this.originalRelease = new AtomicReference<>(); - ACQUIRED_ARRAYS.put(this, TRACK_ALLOCATIONS ? new RuntimeException() : Boolean.TRUE); + ACQUIRED_ARRAYS.put(this, + TRACK_ALLOCATIONS ? new RuntimeException("Unreleased array from test: " + LuceneTestCase.getTestClass().getName()) + : Boolean.TRUE); } protected abstract BigArray getDelegate(); diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java index 5fcf2f11d0ed0..c202688892963 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockPageCacheRecycler.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.util; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.recycler.Recycler.V; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -27,6 +28,7 @@ import java.lang.reflect.Array; import java.util.Arrays; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Random; import java.util.concurrent.ConcurrentHashMap; @@ -48,8 +50,13 @@ public static void ensureAllPagesAreReleased() throws Exception { masterCopy.keySet().retainAll(ACQUIRED_PAGES.keySet()); ACQUIRED_PAGES.keySet().removeAll(masterCopy.keySet()); // remove all existing master copy we will report on if (!masterCopy.isEmpty()) { - final Throwable t = masterCopy.entrySet().iterator().next().getValue(); - throw new RuntimeException(masterCopy.size() + " pages have not been released", t); + Iterator causes = masterCopy.values().iterator(); + Throwable firstCause = causes.next(); + RuntimeException exception = new RuntimeException(masterCopy.size() + " pages have not been released", firstCause); + while (causes.hasNext()) { + exception.addSuppressed(causes.next()); + } + throw exception; } } } @@ -66,7 +73,7 @@ public MockPageCacheRecycler(Settings settings) { } private V wrap(final V v) { - ACQUIRED_PAGES.put(v, new Throwable()); + ACQUIRED_PAGES.put(v, new Throwable("Unreleased Page from test: " + LuceneTestCase.getTestClass().getName())); return new V() { @Override diff --git a/test/framework/src/main/java/org/elasticsearch/index/alias/RandomAliasActionsGenerator.java b/test/framework/src/main/java/org/elasticsearch/index/alias/RandomAliasActionsGenerator.java new file mode 100644 index 0000000000000..7a8355c05f91d --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/alias/RandomAliasActionsGenerator.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.alias; + +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; + +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.test.ESTestCase.between; +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; +import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomInt; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; +import static org.elasticsearch.test.ESTestCase.randomLong; + +public class RandomAliasActionsGenerator { + public static AliasActions randomAliasAction() { + return randomAliasAction(false); + } + + public static AliasActions randomAliasAction(boolean useStringAsFilter) { + AliasActions action = new AliasActions(randomFrom(AliasActions.Type.values())); + if (randomBoolean()) { + action.index(randomAlphaOfLength(5)); + } else { + int numIndices = randomIntBetween(1, 5); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + } + action.indices(indices); + } + if (action.actionType() != AliasActions.Type.REMOVE_INDEX) { + if (randomBoolean()) { + action.alias(randomAlphaOfLength(5)); + } else { + int numAliases = randomIntBetween(1, 5); + String[] aliases = new String[numAliases]; + for (int i = 0; i < numAliases; i++) { + aliases[i] = "alias-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + } + action.aliases(aliases); + } + } + if (action.actionType() == AliasActions.Type.ADD) { + if (randomBoolean()) { + if (useStringAsFilter) { + action.filter(randomAlphaOfLength(5)); + } else { + action.filter(randomMap(randomInt(5))); + } + } + if (randomBoolean()) { + if (randomBoolean()) { + action.routing(randomRouting().toString()); + } else { + action.searchRouting(randomRouting().toString()); + action.indexRouting(randomRouting().toString()); + } + } + } + return action; + } + + public static Map randomMap(int maxDepth) { + int members = between(0, 5); + Map result = new HashMap<>(members); + for (int i = 0; i < members; i++) { + Object value; + switch (between(0, 3)) { + case 0: + if (maxDepth > 0) { + value = randomMap(maxDepth - 1); + } else { + value = randomAlphaOfLength(5); + } + break; + case 1: + value = randomAlphaOfLength(5); + break; + case 2: + value = randomBoolean(); + break; + case 3: + value = randomLong(); + break; + default: + throw new UnsupportedOperationException(); + } + result.put(randomAlphaOfLength(5), value); + } + return result; + } + + public static Object randomRouting() { + return randomBoolean() ? randomAlphaOfLength(5) : randomInt(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java index b35dc9563ce5c..6e5f919f33fdf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockLogAppender.java @@ -92,7 +92,7 @@ public void match(LogEvent event) { saw = true; } } else { - if (event.getMessage().toString().contains(message)) { + if (event.getMessage().getFormattedMessage().contains(message)) { saw = true; } }