-
Notifications
You must be signed in to change notification settings - Fork 25k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Convert RestGetMappingAction to chunked encoding #89906
Merged
original-brownbear
merged 5 commits into
elastic:main
from
original-brownbear:chunked-encoding-get-mappings
Sep 9, 2022
Merged
Changes from 1 commit
Commits
Show all changes
5 commits
Select commit
Hold shift + click to select a range
ebddfc1
Convert RestGetMappingAction to chunked encoding
original-brownbear 8d2225b
Merge remote-tracking branch 'elastic/main' into chunked-encoding-get…
original-brownbear f40e16d
remove needlessly complicated test
original-brownbear e115afd
Merge remote-tracking branch 'elastic/main' into chunked-encoding-get…
original-brownbear b6517df
assert chunk count
original-brownbear File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
73 changes: 73 additions & 0 deletions
73
qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestGetMappingsIT.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
/* | ||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
* or more contributor license agreements. Licensed under the Elastic License | ||
* 2.0 and the Server Side Public License, v 1; you may not use this file except | ||
* in compliance with, at your election, the Elastic License 2.0 or the Server | ||
* Side Public License, v 1. | ||
*/ | ||
package org.elasticsearch.http; | ||
|
||
import org.apache.http.client.methods.HttpGet; | ||
import org.apache.lucene.tests.util.LuceneTestCase; | ||
import org.elasticsearch.action.ActionListener; | ||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; | ||
import org.elasticsearch.action.support.GroupedActionListener; | ||
import org.elasticsearch.action.support.PlainActionFuture; | ||
import org.elasticsearch.client.Request; | ||
import org.elasticsearch.index.mapper.MapperService; | ||
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; | ||
import org.elasticsearch.xcontent.XContentBuilder; | ||
import org.elasticsearch.xcontent.XContentParser; | ||
import org.elasticsearch.xcontent.XContentParserConfiguration; | ||
import org.elasticsearch.xcontent.json.JsonXContent; | ||
import org.hamcrest.Matchers; | ||
|
||
import java.io.InputStream; | ||
import java.util.List; | ||
import java.util.Map; | ||
|
||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; | ||
import static org.hamcrest.Matchers.aMapWithSize; | ||
|
||
@LuceneTestCase.SuppressFileSystems(value = "HandleLimitFS") // we sometimes have >2048 open files | ||
public class RestGetMappingsIT extends HttpSmokeTestCase { | ||
|
||
public void testGetLargeMappingsResponse() throws Exception { | ||
final XContentBuilder builder = JsonXContent.contentBuilder() | ||
.startObject() | ||
.startObject(MapperService.SINGLE_MAPPING_NAME) | ||
.startObject("properties"); | ||
final int fields = randomIntBetween(500, 1000); | ||
for (int i = 0; i < fields; i++) { | ||
builder.startObject("loooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong-named-field-" + i) | ||
.field("type", "text") | ||
.field("store", true) | ||
.endObject(); | ||
} | ||
builder.endObject().endObject().endObject(); | ||
assertAcked(admin().indices().preparePutTemplate("large-mapping").setPatterns(List.of("test-idx-*")).setMapping(builder).get()); | ||
final int indexCount = randomIntBetween(50, 150); | ||
final PlainActionFuture<Void> f = PlainActionFuture.newFuture(); | ||
final ActionListener<CreateIndexResponse> listener = new GroupedActionListener<>(f.map(l -> null), indexCount); | ||
for (int i = 0; i < indexCount; i++) { | ||
admin().indices() | ||
.prepareCreate("test-idx-" + i) | ||
.setSettings(AbstractSnapshotIntegTestCase.SINGLE_SHARD_NO_REPLICA) | ||
.execute(listener); | ||
} | ||
f.get(); | ||
final var response = getRestClient().performRequest(new Request(HttpGet.METHOD_NAME, "/_mappings")); | ||
try ( | ||
InputStream input = response.getEntity().getContent(); | ||
XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, input) | ||
) { | ||
final Map<String, Object> mappings = parser.map(); | ||
assertThat(mappings, Matchers.aMapWithSize(indexCount)); | ||
final Object idx0 = mappings.get("test-idx-0"); | ||
@SuppressWarnings("unchecked") | ||
var properties = ((Map<String, Map<String, Object>>) idx0).get("mappings").get("properties"); | ||
assertThat((Map<?, ?>) properties, aMapWithSize(fields)); | ||
mappings.forEach((key, value) -> assertEquals(key, idx0, value)); | ||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -8,27 +8,26 @@ | |
|
||
package org.elasticsearch.rest.action.admin.indices; | ||
|
||
import org.elasticsearch.ElasticsearchTimeoutException; | ||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; | ||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; | ||
import org.elasticsearch.action.support.IndicesOptions; | ||
import org.elasticsearch.client.internal.node.NodeClient; | ||
import org.elasticsearch.common.Strings; | ||
import org.elasticsearch.common.collect.Iterators; | ||
import org.elasticsearch.common.logging.DeprecationLogger; | ||
import org.elasticsearch.core.RestApiVersion; | ||
import org.elasticsearch.core.TimeValue; | ||
import org.elasticsearch.http.HttpChannel; | ||
import org.elasticsearch.rest.BaseRestHandler; | ||
import org.elasticsearch.rest.ChunkedRestResponseBody; | ||
import org.elasticsearch.rest.RestRequest; | ||
import org.elasticsearch.rest.action.DispatchingRestToXContentListener; | ||
import org.elasticsearch.rest.RestResponse; | ||
import org.elasticsearch.rest.RestStatus; | ||
import org.elasticsearch.rest.action.RestActionListener; | ||
import org.elasticsearch.rest.action.RestCancellableNodeClient; | ||
import org.elasticsearch.threadpool.ThreadPool; | ||
import org.elasticsearch.xcontent.ToXContentObject; | ||
import org.elasticsearch.xcontent.XContentBuilder; | ||
|
||
import java.io.IOException; | ||
import java.util.List; | ||
import java.util.function.LongSupplier; | ||
|
||
import static org.elasticsearch.rest.RestRequest.Method.GET; | ||
import static org.elasticsearch.rest.RestRequest.Method.HEAD; | ||
|
@@ -40,11 +39,7 @@ public class RestGetMappingAction extends BaseRestHandler { | |
public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in get mapping request is deprecated. " | ||
+ "Use typeless api instead"; | ||
|
||
private final ThreadPool threadPool; | ||
|
||
public RestGetMappingAction(ThreadPool threadPool) { | ||
this.threadPool = threadPool; | ||
} | ||
public RestGetMappingAction() {} | ||
|
||
@Override | ||
public List<Route> routes() { | ||
|
@@ -97,37 +92,25 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC | |
final HttpChannel httpChannel = request.getHttpChannel(); | ||
return channel -> new RestCancellableNodeClient(client, httpChannel).admin() | ||
.indices() | ||
.getMappings( | ||
getMappingsRequest, | ||
new DispatchingRestToXContentListener<>(threadPool.executor(ThreadPool.Names.MANAGEMENT), channel, request).map( | ||
getMappingsResponse -> new RestGetMappingsResponse(getMappingsResponse, threadPool::relativeTimeInMillis, timeout) | ||
) | ||
); | ||
} | ||
|
||
private static final class RestGetMappingsResponse implements ToXContentObject { | ||
private final GetMappingsResponse response; | ||
private final LongSupplier relativeTimeSupplierMillis; | ||
private final TimeValue timeout; | ||
private final long startTimeMs; | ||
|
||
private RestGetMappingsResponse(GetMappingsResponse response, LongSupplier relativeTimeSupplierMillis, TimeValue timeout) { | ||
this.response = response; | ||
this.relativeTimeSupplierMillis = relativeTimeSupplierMillis; | ||
this.timeout = timeout; | ||
this.startTimeMs = relativeTimeSupplierMillis.getAsLong(); | ||
} | ||
|
||
@Override | ||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { | ||
if (relativeTimeSupplierMillis.getAsLong() - startTimeMs > timeout.millis()) { | ||
throw new ElasticsearchTimeoutException("Timed out getting mappings"); | ||
} | ||
|
||
builder.startObject(); | ||
response.toXContent(builder, params); | ||
builder.endObject(); | ||
return builder; | ||
} | ||
.getMappings(getMappingsRequest, new RestActionListener<>(channel) { | ||
@Override | ||
protected void processResponse(GetMappingsResponse getMappingsResponse) throws Exception { | ||
ensureOpen(); | ||
channel.sendResponse( | ||
new RestResponse( | ||
RestStatus.OK, | ||
ChunkedRestResponseBody.fromXContent( | ||
() -> Iterators.concat( | ||
Iterators.single((b, p) -> b.startObject()), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not quite sure why the |
||
getMappingsResponse.toXContentChunked(), | ||
Iterators.single((b, p) -> b.endObject()) | ||
), | ||
request, | ||
channel | ||
) | ||
) | ||
); | ||
} | ||
}); | ||
} | ||
} |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hmm I think if we approach testing chunked encoding like this across all APIs we'll end up adding a lot of costly tests. Could we instead reduce the chunk size in these smoke tests to force chunking at a more reasonable scale?
Also I think this test passes even without the production code changes. Can we assert that the REST response really did arrive in multiple chunks?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hmm now that I think about it more, maybe this test is pointless. I really wanted to have a test that would actually cause a round of chunked encoding where the channel becomes not-writable at least once. But now that I think about it again, that's a really redundant test probably ... I'll just remove this and rely on the fact that this is tested by the existing REST tests (that will now see a chunked response) and our Netty specific tests around flushing messages. I think that's good enough here. Sorry for the noise
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Maybe I'm confused, but I was expecting that we'd send the response in chunks regardless of writability. I think it's worth checking that, but
MAX_BYTES_PER_WRITE = 1 << 18
is just a bit big for a test.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yea the response is always chunked but the fact that it serializes fine is already tested elsewhere. Adding a test that verifies that we actually get chunked bytes back seems quite redundant doesn't it, I mean unless someone wilfully changes the code to revert back to a normal response (which I'll make very hard shortly, PR incoming :)) this doesn't guard against anything does it?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ehh maybe I'm paranoid but I could imagine some future refactoring which accidentally puts all the objects into a single chunk, quietly destroying the memory-efficiency of this API.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That makes sense :) How about b6517df to make sure that won't happen?