Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…smarter_field_caps_subscribableListener
  • Loading branch information
astefan committed Nov 26, 2024
2 parents 8d55211 + fadc752 commit f4fcfd3
Show file tree
Hide file tree
Showing 10 changed files with 264 additions and 92 deletions.
8 changes: 8 additions & 0 deletions muted-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,14 @@ tests:
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=transform/transforms_reset/Test reset running transform}
issue: https://github.com/elastic/elasticsearch/issues/117473
- class: org.elasticsearch.xpack.esql.qa.single_node.FieldExtractorIT
method: testConstantKeywordField
issue: https://github.com/elastic/elasticsearch/issues/117524
- class: org.elasticsearch.xpack.esql.qa.multi_node.FieldExtractorIT
method: testConstantKeywordField
issue: https://github.com/elastic/elasticsearch/issues/117524
- class: org.elasticsearch.repositories.s3.RepositoryS3EcsClientYamlTestSuiteIT
issue: https://github.com/elastic/elasticsearch/issues/117525

# Examples:
#
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ private static Version parseUnchecked(String version) {
public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_00_0, Version.LUCENE_9_12_0);
public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT_BACKPORT = def(8_519_00_0, Version.LUCENE_9_12_0);
public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID_BACKPORT = def(8_520_00_0, Version.LUCENE_9_12_0);
public static final IndexVersion V8_DEPRECATE_SOURCE_MODE_MAPPER = def(8_521_00_0, Version.LUCENE_9_12_0);
public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0);
public static final IndexVersion LOGSDB_DEFAULT_IGNORE_DYNAMIC_BEYOND_LIMIT = def(9_001_00_0, Version.LUCENE_10_0_0);
public static final IndexVersion TIME_BASED_K_ORDERED_DOC_ID = def(9_002_00_0, Version.LUCENE_10_0_0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -484,8 +484,7 @@ public boolean isStored() {
}

public static boolean onOrAfterDeprecateModeVersion(IndexVersion version) {
return version.onOrAfter(IndexVersions.DEPRECATE_SOURCE_MODE_MAPPER);
// Adjust versions after backporting.
// || version.between(IndexVersions.BACKPORT_DEPRECATE_SOURCE_MODE_MAPPER, IndexVersions.UPGRADE_TO_LUCENE_10_0_0);
return version.onOrAfter(IndexVersions.DEPRECATE_SOURCE_MODE_MAPPER)
|| version.between(IndexVersions.V8_DEPRECATE_SOURCE_MODE_MAPPER, IndexVersions.UPGRADE_TO_LUCENE_10_0_0);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
/**
* {@link ExchangeService} is responsible for exchanging pages between exchange sinks and sources on the same or different nodes.
* It holds a map of {@link ExchangeSinkHandler} instances for each node in the cluster to serve {@link ExchangeRequest}s
* To connect exchange sources to exchange sinks, use the {@link ExchangeSourceHandler#addRemoteSink(RemoteSink, int)} method.
* To connect exchange sources to exchange sinks, use {@link ExchangeSourceHandler#addRemoteSink(RemoteSink, boolean, int, ActionListener)}.
*/
public final class ExchangeService extends AbstractLifecycleComponent {
// TODO: Make this a child action of the data node transport to ensure that exchanges
Expand Down Expand Up @@ -311,7 +311,7 @@ static final class TransportRemoteSink implements RemoteSink {

@Override
public void fetchPageAsync(boolean allSourcesFinished, ActionListener<ExchangeResponse> listener) {
final long reservedBytes = estimatedPageSizeInBytes.get();
final long reservedBytes = allSourcesFinished ? 0 : estimatedPageSizeInBytes.get();
if (reservedBytes > 0) {
// This doesn't fully protect ESQL from OOM, but reduces the likelihood.
blockFactory.breaker().addEstimateBytesAndMaybeBreak(reservedBytes, "fetch page");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ public IsBlockedResult waitForWriting() {
* @param sourceFinished if true, then this handler can finish as sources have enough pages.
* @param listener the listener that will be notified when pages are ready or this handler is finished
* @see RemoteSink
* @see ExchangeSourceHandler#addRemoteSink(RemoteSink, int)
* @see ExchangeSourceHandler#addRemoteSink(RemoteSink, boolean, int, ActionListener)
*/
public void fetchPageAsync(boolean sourceFinished, ActionListener<ExchangeResponse> listener) {
if (sourceFinished) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,54 @@
/**
* An {@link ExchangeSourceHandler} asynchronously fetches pages and status from multiple {@link RemoteSink}s
* and feeds them to its {@link ExchangeSource}, which are created using the {@link #createExchangeSource()}) method.
* {@link RemoteSink}s are added using the {@link #addRemoteSink(RemoteSink, int)}) method.
* {@link RemoteSink}s are added using the {@link #addRemoteSink(RemoteSink, boolean, int, ActionListener)}) method.
*
* @see #createExchangeSource()
* @see #addRemoteSink(RemoteSink, int)
* @see #addRemoteSink(RemoteSink, boolean, int, ActionListener)
*/
public final class ExchangeSourceHandler {
private final ExchangeBuffer buffer;
private final Executor fetchExecutor;

private final PendingInstances outstandingSinks;
private final PendingInstances outstandingSources;
// Collect failures that occur while fetching pages from the remote sink with `failFast=true`.
// The exchange source will stop fetching and abort as soon as any failure is added to this failure collector.
// The final failure collected will be notified to callers via the {@code completionListener}.
private final FailureCollector failure = new FailureCollector();

public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor) {
/**
* Creates a new ExchangeSourceHandler.
*
* @param maxBufferSize the maximum size of the exchange buffer. A larger buffer reduces ``pauses`` but uses more memory,
* which could otherwise be allocated for other purposes.
* @param fetchExecutor the executor used to fetch pages.
* @param completionListener a listener that will be notified when the exchange source handler fails or completes
*/
public ExchangeSourceHandler(int maxBufferSize, Executor fetchExecutor, ActionListener<Void> completionListener) {
this.buffer = new ExchangeBuffer(maxBufferSize);
this.fetchExecutor = fetchExecutor;
this.outstandingSinks = new PendingInstances(() -> buffer.finish(false));
this.outstandingSources = new PendingInstances(() -> buffer.finish(true));
buffer.addCompletionListener(ActionListener.running(() -> {
final ActionListener<Void> listener = ActionListener.assertAtLeastOnce(completionListener).delegateFailure((l, unused) -> {
final Exception e = failure.getFailure();
if (e != null) {
l.onFailure(e);
} else {
l.onResponse(null);
}
});
try (RefCountingListener refs = new RefCountingListener(listener)) {
for (PendingInstances pending : List.of(outstandingSinks, outstandingSources)) {
// Create an outstanding instance and then finish to complete the completionListener
// if we haven't registered any instances of exchange sinks or exchange sources before.
pending.trackNewInstance();
pending.completion.addListener(refs.acquire());
pending.finishInstance();
}
}
}));
}

private class ExchangeSourceImpl implements ExchangeSource {
Expand Down Expand Up @@ -89,20 +119,6 @@ public int bufferSize() {
}
}

public void addCompletionListener(ActionListener<Void> listener) {
buffer.addCompletionListener(ActionListener.running(() -> {
try (RefCountingListener refs = new RefCountingListener(listener)) {
for (PendingInstances pending : List.of(outstandingSinks, outstandingSources)) {
// Create an outstanding instance and then finish to complete the completionListener
// if we haven't registered any instances of exchange sinks or exchange sources before.
pending.trackNewInstance();
pending.completion.addListener(refs.acquire());
pending.finishInstance();
}
}
}));
}

/**
* Create a new {@link ExchangeSource} for exchanging data
*
Expand Down Expand Up @@ -159,10 +175,14 @@ void exited() {
private final class RemoteSinkFetcher {
private volatile boolean finished = false;
private final RemoteSink remoteSink;
private final boolean failFast;
private final ActionListener<Void> completionListener;

RemoteSinkFetcher(RemoteSink remoteSink) {
RemoteSinkFetcher(RemoteSink remoteSink, boolean failFast, ActionListener<Void> completionListener) {
outstandingSinks.trackNewInstance();
this.remoteSink = remoteSink;
this.failFast = failFast;
this.completionListener = completionListener;
}

void fetchPage() {
Expand Down Expand Up @@ -198,15 +218,22 @@ void fetchPage() {
}

void onSinkFailed(Exception e) {
failure.unwrapAndCollect(e);
if (failFast) {
failure.unwrapAndCollect(e);
}
buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading
onSinkComplete();
if (finished == false) {
finished = true;
outstandingSinks.finishInstance();
completionListener.onFailure(e);
}
}

void onSinkComplete() {
if (finished == false) {
finished = true;
outstandingSinks.finishInstance();
completionListener.onResponse(null);
}
}
}
Expand All @@ -215,23 +242,36 @@ void onSinkComplete() {
* Add a remote sink as a new data source of this handler. The handler will start fetching data from this remote sink intermediately.
*
* @param remoteSink the remote sink
* @param instances the number of concurrent ``clients`` that this handler should use to fetch pages. More clients reduce latency,
* but add overhead.
* @param failFast determines how failures in this remote sink are handled:
* - If {@code false}, failures from this remote sink will not cause the exchange source to abort.
* Callers must handle these failures notified via {@code listener}.
* - If {@code true}, failures from this remote sink will cause the exchange source to abort.
* Callers can safely ignore failures notified via this listener, as they are collected and
* reported by the exchange source.
* @param instances the number of concurrent ``clients`` that this handler should use to fetch pages.
* More clients reduce latency, but add overhead.
* @param listener a listener that will be notified when the sink fails or completes
* @see ExchangeSinkHandler#fetchPageAsync(boolean, ActionListener)
*/
public void addRemoteSink(RemoteSink remoteSink, int instances) {
public void addRemoteSink(RemoteSink remoteSink, boolean failFast, int instances, ActionListener<Void> listener) {
final ActionListener<Void> sinkListener = ActionListener.assertAtLeastOnce(ActionListener.notifyOnce(listener));
fetchExecutor.execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
failure.unwrapAndCollect(e);
if (failFast) {
failure.unwrapAndCollect(e);
}
buffer.waitForReading().listener().onResponse(null); // resume the Driver if it is being blocked on reading
sinkListener.onFailure(e);
}

@Override
protected void doRun() {
for (int i = 0; i < instances; i++) {
var fetcher = new RemoteSinkFetcher(remoteSink);
fetcher.fetchPage();
try (RefCountingListener refs = new RefCountingListener(sinkListener)) {
for (int i = 0; i < instances; i++) {
var fetcher = new RemoteSinkFetcher(remoteSink, failFast, refs.acquire());
fetcher.fetchPage();
}
}
}
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -209,8 +209,19 @@ List<Driver> createDriversForInput(List<Page> input, List<Page> results, boolean
randomIntBetween(2, 10),
threadPool.relativeTimeInMillisSupplier()
);
ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(randomIntBetween(1, 4), threadPool.executor(ESQL_TEST_EXECUTOR));
sourceExchanger.addRemoteSink(sinkExchanger::fetchPageAsync, 1);
ExchangeSourceHandler sourceExchanger = new ExchangeSourceHandler(
randomIntBetween(1, 4),
threadPool.executor(ESQL_TEST_EXECUTOR),
ActionListener.noop()
);
sourceExchanger.addRemoteSink(
sinkExchanger::fetchPageAsync,
randomBoolean(),
1,
ActionListener.<Void>noop().delegateResponse((l, e) -> {
throw new AssertionError("unexpected failure", e);
})
);

Iterator<? extends Operator> intermediateOperatorItr;
int itrSize = (splitInput.size() * 3) + 3; // 3 inter ops per initial source drivers, and 3 per final
Expand Down
Loading

0 comments on commit f4fcfd3

Please sign in to comment.