From ef95cdd4cce3cdb3c788dd6c2de122dcc7f82d4a Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 26 Aug 2024 18:51:12 -0700 Subject: [PATCH 01/46] Fix native library loading zstd with jna (#112221) Recent refactoring of native library paths broke jna loading zstd. This commit fixes jna to set the jna.library.path during init so that jna calls to load libraries still work. --- .../nativeaccess/jna/JnaNativeLibraryProvider.java | 11 +++++++++++ .../elasticsearch/nativeaccess/lib/LoaderHelper.java | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java index 79caf04c97246..e0233187425ea 100644 --- a/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java +++ b/libs/native/jna/src/main/java/org/elasticsearch/nativeaccess/jna/JnaNativeLibraryProvider.java @@ -8,9 +8,11 @@ package org.elasticsearch.nativeaccess.jna; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.nativeaccess.lib.JavaLibrary; import org.elasticsearch.nativeaccess.lib.Kernel32Library; import org.elasticsearch.nativeaccess.lib.LinuxCLibrary; +import org.elasticsearch.nativeaccess.lib.LoaderHelper; import org.elasticsearch.nativeaccess.lib.MacCLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibrary; import org.elasticsearch.nativeaccess.lib.NativeLibraryProvider; @@ -23,6 +25,10 @@ public class JnaNativeLibraryProvider extends NativeLibraryProvider { + static { + setJnaLibraryPath(); + } + public JnaNativeLibraryProvider() { super( "jna", @@ -45,6 +51,11 @@ public JnaNativeLibraryProvider() { ); } + @SuppressForbidden(reason = "jna library path must be set for load library to work with our own libs") + private static void setJnaLibraryPath() { + System.setProperty("jna.library.path", LoaderHelper.platformLibDir.toString()); + } + private static Supplier notImplemented() { return () -> { throw new AssertionError(); }; } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java index 4da52c415c040..42ca60b81a027 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/LoaderHelper.java @@ -16,7 +16,7 @@ * A utility for loading libraries from Elasticsearch's platform specific lib dir. */ public class LoaderHelper { - private static final Path platformLibDir = findPlatformLibDir(); + public static final Path platformLibDir = findPlatformLibDir(); private static Path findPlatformLibDir() { // tests don't have an ES install, so the platform dir must be passed in explicitly From 535e9edced9995e8411b46622e29f8ae006ab4f1 Mon Sep 17 00:00:00 2001 From: Quentin Pradet Date: Tue, 27 Aug 2024 06:38:11 +0400 Subject: [PATCH 02/46] Add ingest-geoip module to rest-resources-zip (#112216) --- modules/ingest-geoip/build.gradle | 4 ++++ x-pack/rest-resources-zip/build.gradle | 1 + 2 files changed, 5 insertions(+) diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 5bdb6da5c7b29..bc5bb165cd0d2 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -88,3 +88,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTestsByFilePattern("**/ingest_geoip/20_geoip_processor.yml", "from 8.0 yaml rest tests use geoip test fixture and default geoip are no longer packaged. In 7.x yaml tests used default databases which makes tests results very different, so skipping these tests") // task.skipTest("lang_mustache/50_multi_search_template/Multi-search template with errors", "xxx") } + +artifacts { + restTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) +} diff --git a/x-pack/rest-resources-zip/build.gradle b/x-pack/rest-resources-zip/build.gradle index cc5bddf12d801..0133ff80dfadf 100644 --- a/x-pack/rest-resources-zip/build.gradle +++ b/x-pack/rest-resources-zip/build.gradle @@ -21,6 +21,7 @@ dependencies { freeTests project(path: ':rest-api-spec', configuration: 'restTests') freeTests project(path: ':modules:aggregations', configuration: 'restTests') freeTests project(path: ':modules:analysis-common', configuration: 'restTests') + freeTests project(path: ':modules:ingest-geoip', configuration: 'restTests') compatApis project(path: ':rest-api-spec', configuration: 'restCompatSpecs') compatApis project(path: ':x-pack:plugin', configuration: 'restCompatSpecs') freeCompatTests project(path: ':rest-api-spec', configuration: 'restCompatTests') From d14fe7733b2ce361e08c05624668fddbf2763a86 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 27 Aug 2024 17:03:01 +1000 Subject: [PATCH 03/46] Expand RecordingInstrucments to support collection of observers (#112195) The support is needed for RecordingInstruments to be used in tests for guages with a collection of observers. Relates: #110630 --- .../telemetry/RecordingInstruments.java | 29 ++++++++----- .../telemetry/RecordingMeterRegistry.java | 42 +++++++++++-------- 2 files changed, 43 insertions(+), 28 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java index 35417c16e7e1c..49e667bb74e5b 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingInstruments.java @@ -24,6 +24,7 @@ import org.elasticsearch.telemetry.metric.LongUpDownCounter; import org.elasticsearch.telemetry.metric.LongWithAttributes; +import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -53,7 +54,7 @@ public String getName() { } } - protected interface NumberWithAttributesObserver extends Supplier>> { + protected interface NumberWithAttributesObserver extends Supplier>>> { } @@ -74,7 +75,7 @@ public void run() { return; } var observation = observer.get(); - call(observation.v1(), observation.v2()); + observation.forEach(o -> call(o.v1(), o.v2())); } } @@ -109,10 +110,10 @@ public void incrementBy(double inc, Map attributes) { } public static class RecordingDoubleGauge extends CallbackRecordingInstrument implements DoubleGauge { - public RecordingDoubleGauge(String name, Supplier observer, MetricRecorder recorder) { + public RecordingDoubleGauge(String name, Supplier> observer, MetricRecorder recorder) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } } @@ -172,10 +173,14 @@ public void incrementBy(long inc, Map attributes) { public static class RecordingAsyncLongCounter extends CallbackRecordingInstrument implements LongAsyncCounter { - public RecordingAsyncLongCounter(String name, Supplier observer, MetricRecorder recorder) { + public RecordingAsyncLongCounter( + String name, + Supplier> observer, + MetricRecorder recorder + ) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } @@ -183,10 +188,14 @@ public RecordingAsyncLongCounter(String name, Supplier obser public static class RecordingAsyncDoubleCounter extends CallbackRecordingInstrument implements DoubleAsyncCounter { - public RecordingAsyncDoubleCounter(String name, Supplier observer, MetricRecorder recorder) { + public RecordingAsyncDoubleCounter( + String name, + Supplier> observer, + MetricRecorder recorder + ) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } @@ -194,10 +203,10 @@ public RecordingAsyncDoubleCounter(String name, Supplier o public static class RecordingLongGauge extends CallbackRecordingInstrument implements LongGauge { - public RecordingLongGauge(String name, Supplier observer, MetricRecorder recorder) { + public RecordingLongGauge(String name, Supplier> observer, MetricRecorder recorder) { super(name, () -> { var observation = observer.get(); - return new Tuple<>(observation.value(), observation.attributes()); + return observation.stream().map(o -> new Tuple<>((Number) o.value(), o.attributes())).toList(); }, recorder); } } diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java index 97fe0ad1370ef..392445aa77a8f 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/RecordingMeterRegistry.java @@ -24,6 +24,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.Collection; +import java.util.Collections; import java.util.function.Supplier; /** @@ -72,9 +73,7 @@ protected DoubleUpDownCounter buildDoubleUpDownCounter(String name, String descr @Override public DoubleGauge registerDoubleGauge(String name, String description, String unit, Supplier observer) { - DoubleGauge instrument = buildDoubleGauge(name, description, unit, observer); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerDoublesGauge(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -84,7 +83,9 @@ public DoubleGauge registerDoublesGauge( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + DoubleGauge instrument = buildDoubleGauge(name, description, unit, observer); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -92,7 +93,12 @@ public DoubleGauge getDoubleGauge(String name) { return (DoubleGauge) recorder.getInstrument(InstrumentType.DOUBLE_GAUGE, name); } - protected DoubleGauge buildDoubleGauge(String name, String description, String unit, Supplier observer) { + protected DoubleGauge buildDoubleGauge( + String name, + String description, + String unit, + Supplier> observer + ) { return new RecordingInstruments.RecordingDoubleGauge(name, observer, recorder); } @@ -121,9 +127,7 @@ public LongCounter registerLongCounter(String name, String description, String u @Override public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { - LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerLongsAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -133,7 +137,9 @@ public LongAsyncCounter registerLongsAsyncCounter( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + LongAsyncCounter instrument = new RecordingInstruments.RecordingAsyncLongCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -148,9 +154,7 @@ public DoubleAsyncCounter registerDoubleAsyncCounter( String unit, Supplier observer ) { - DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerDoublesAsyncCounter(name, description, unit, () -> Collections.singleton(observer.get())); } @Override @@ -160,7 +164,9 @@ public DoubleAsyncCounter registerDoublesAsyncCounter( String unit, Supplier> observer ) { - throw new UnsupportedOperationException("not implemented"); + DoubleAsyncCounter instrument = new RecordingInstruments.RecordingAsyncDoubleCounter(name, observer, recorder); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -196,14 +202,14 @@ protected LongUpDownCounter buildLongUpDownCounter(String name, String descripti @Override public LongGauge registerLongGauge(String name, String description, String unit, Supplier observer) { - LongGauge instrument = buildLongGauge(name, description, unit, observer); - recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); - return instrument; + return registerLongsGauge(name, description, unit, () -> Collections.singleton(observer.get())); } @Override public LongGauge registerLongsGauge(String name, String description, String unit, Supplier> observer) { - throw new UnsupportedOperationException("not implemented"); + LongGauge instrument = buildLongGauge(name, description, unit, observer); + recorder.register(instrument, InstrumentType.fromInstrument(instrument), name, description, unit); + return instrument; } @Override @@ -211,7 +217,7 @@ public LongGauge getLongGauge(String name) { return (LongGauge) recorder.getInstrument(InstrumentType.LONG_GAUGE, name); } - protected LongGauge buildLongGauge(String name, String description, String unit, Supplier observer) { + protected LongGauge buildLongGauge(String name, String description, String unit, Supplier> observer) { return new RecordingInstruments.RecordingLongGauge(name, observer, recorder); } From 303b2274766595c2bbbd2b339345cfa6b6a2009e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:05:46 +0100 Subject: [PATCH 04/46] Add link to warning re. single-node clusters (#112114) Expands the message added in #88013 to include a link to the relevant docs. --- .../cluster/coordination/Coordinator.java | 7 +++++-- .../java/org/elasticsearch/common/ReferenceDocs.java | 1 + .../elasticsearch/common/reference-docs-links.json | 3 ++- .../cluster/coordination/CoordinatorTests.java | 11 ++++++++++- 4 files changed, 18 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 437219b312045..e922d130d7f83 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -41,6 +41,7 @@ import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -831,10 +832,12 @@ public void run() { discover other nodes and form a multi-node cluster via the [{}={}] setting. Fully-formed clusters do \ not attempt to discover other nodes, and nodes with different cluster UUIDs cannot belong to the same \ cluster. The cluster UUID persists across restarts and can only be changed by deleting the contents of \ - the node's data path(s). Remove the discovery configuration to suppress this message.""", + the node's data path(s). Remove the discovery configuration to suppress this message. See [{}] for \ + more information.""", applierState.metadata().clusterUUID(), DISCOVERY_SEED_HOSTS_SETTING.getKey(), - DISCOVERY_SEED_HOSTS_SETTING.get(settings) + DISCOVERY_SEED_HOSTS_SETTING.get(settings), + ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS ); } } diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index f710ae7c3b84a..59c55fb7b624a 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -81,6 +81,7 @@ public enum ReferenceDocs { MAX_SHARDS_PER_NODE, FLOOD_STAGE_WATERMARK, X_OPAQUE_ID, + FORMING_SINGLE_NODE_CLUSTERS, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index 8288ca792b0f1..3eb8939c22a65 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -41,5 +41,6 @@ "LUCENE_MAX_DOCS_LIMIT": "size-your-shards.html#troubleshooting-max-docs-limit", "MAX_SHARDS_PER_NODE": "size-your-shards.html#troubleshooting-max-shards-open", "FLOOD_STAGE_WATERMARK": "fix-watermark-errors.html", - "X_OPAQUE_ID": "api-conventions.html#x-opaque-id" + "X_OPAQUE_ID": "api-conventions.html#x-opaque-id", + "FORMING_SINGLE_NODE_CLUSTERS": "modules-discovery-bootstrap-cluster.html#modules-discovery-bootstrap-cluster-joining" } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index b57badb3a180f..bf64b29d364e0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterStateUpdateStats; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; @@ -79,6 +80,8 @@ import static org.elasticsearch.discovery.SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING; import static org.elasticsearch.monitor.StatusInfo.Status.HEALTHY; import static org.elasticsearch.monitor.StatusInfo.Status.UNHEALTHY; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -1762,7 +1765,13 @@ public void testLogsWarningPeriodicallyIfSingleNodeClusterHasSeedHosts() { @Override public void match(LogEvent event) { final String message = event.getMessage().getFormattedMessage(); - assertThat(message, startsWith("This node is a fully-formed single-node cluster with cluster UUID")); + assertThat( + message, + allOf( + startsWith("This node is a fully-formed single-node cluster with cluster UUID"), + containsString(ReferenceDocs.FORMING_SINGLE_NODE_CLUSTERS.toString()) + ) + ); loggedClusterUuid = (String) event.getMessage().getParameters()[0]; } From ec90d2c1239bf848914dc4411c676a1f05f2777a Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:06:05 +0100 Subject: [PATCH 05/46] Reduce nesting in restore-snapshot path (#112107) Also cleans up the exception-handling a little to ensure that all failures are logged. --- .../snapshots/RestoreService.java | 114 +++++++++--------- 1 file changed, 59 insertions(+), 55 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 0f03cfab4ad2e..d8987495f9035 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.RefCountingRunnable; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; @@ -56,7 +57,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; @@ -92,9 +92,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -248,62 +248,66 @@ public void restoreSnapshot( final BiConsumer updater ) { assert Repository.assertSnapshotMetaThread(); - try { - // Try and fill in any missing repository UUIDs in case they're needed during the restore - final var repositoryUuidRefreshStep = new ListenableFuture(); - refreshRepositoryUuids( - refreshRepositoryUuidOnRestore, - repositoriesService, - () -> repositoryUuidRefreshStep.onResponse(null), - snapshotMetaExecutor - ); - // Read snapshot info and metadata from the repository - final String repositoryName = request.repository(); - Repository repository = repositoriesService.repository(repositoryName); - final ListenableFuture repositoryDataListener = new ListenableFuture<>(); - repository.getRepositoryData(snapshotMetaExecutor, repositoryDataListener); - - repositoryDataListener.addListener( - listener.delegateFailureAndWrap( - (delegate, repositoryData) -> repositoryUuidRefreshStep.addListener( - delegate.delegateFailureAndWrap((subDelegate, ignored) -> { - assert Repository.assertSnapshotMetaThread(); - final String snapshotName = request.snapshot(); - final Optional matchingSnapshotId = repositoryData.getSnapshotIds() - .stream() - .filter(s -> snapshotName.equals(s.getName())) - .findFirst(); - if (matchingSnapshotId.isPresent() == false) { - throw new SnapshotRestoreException(repositoryName, snapshotName, "snapshot does not exist"); - } + // Try and fill in any missing repository UUIDs in case they're needed during the restore + final var repositoryUuidRefreshStep = SubscribableListener.newForked( + l -> refreshRepositoryUuids(refreshRepositoryUuidOnRestore, repositoriesService, () -> l.onResponse(null), snapshotMetaExecutor) + ); - final SnapshotId snapshotId = matchingSnapshotId.get(); - if (request.snapshotUuid() != null && request.snapshotUuid().equals(snapshotId.getUUID()) == false) { - throw new SnapshotRestoreException( - repositoryName, - snapshotName, - "snapshot UUID mismatch: expected [" - + request.snapshotUuid() - + "] but got [" - + snapshotId.getUUID() - + "]" - ); - } - repository.getSnapshotInfo( - snapshotId, - subDelegate.delegateFailureAndWrap( - (l, snapshotInfo) -> startRestore(snapshotInfo, repository, request, repositoryData, updater, l) - ) - ); - }) - ) + // AtomicReference just so we have somewhere to hold these objects, there's no interesting concurrency here + final AtomicReference repositoryRef = new AtomicReference<>(); + final AtomicReference repositoryDataRef = new AtomicReference<>(); + + SubscribableListener + + .newForked(repositorySetListener -> { + // do this within newForked for exception handling + repositoryRef.set(repositoriesService.repository(request.repository())); + repositorySetListener.onResponse(null); + }) + + .andThen( + repositoryDataListener -> repositoryRef.get().getRepositoryData(snapshotMetaExecutor, repositoryDataListener) + ) + .andThenAccept(repositoryDataRef::set) + .andThen(repositoryUuidRefreshStep::addListener) + + .andThen(snapshotInfoListener -> { + assert Repository.assertSnapshotMetaThread(); + final String snapshotName = request.snapshot(); + final SnapshotId snapshotId = repositoryDataRef.get() + .getSnapshotIds() + .stream() + .filter(s -> snapshotName.equals(s.getName())) + .findFirst() + .orElseThrow(() -> new SnapshotRestoreException(request.repository(), snapshotName, "snapshot does not exist")); + + if (request.snapshotUuid() != null && request.snapshotUuid().equals(snapshotId.getUUID()) == false) { + throw new SnapshotRestoreException( + request.repository(), + snapshotName, + "snapshot UUID mismatch: expected [" + request.snapshotUuid() + "] but got [" + snapshotId.getUUID() + "]" + ); + } + + repositoryRef.get().getSnapshotInfo(snapshotId, snapshotInfoListener); + }) + + .andThen( + (responseListener, snapshotInfo) -> startRestore( + snapshotInfo, + repositoryRef.get(), + request, + repositoryDataRef.get(), + updater, + responseListener ) - ); - } catch (Exception e) { - logger.warn(() -> "[" + request.repository() + ":" + request.snapshot() + "] failed to restore snapshot", e); - listener.onFailure(e); - } + ) + + .addListener(listener.delegateResponse((delegate, e) -> { + logger.warn(() -> "[" + request.repository() + ":" + request.snapshot() + "] failed to restore snapshot", e); + delegate.onFailure(e); + })); } /** From bff45aaa8a2d53d3de44c66a2c692664fa3b3d46 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 08:06:20 +0100 Subject: [PATCH 06/46] Reduce `CompletableFuture` usage in tests (#111848) Fixes some spots in tests where we use `CompletableFuture` instead of one of the preferred alternatives. --- .../grok/MatcherWatchdogTests.java | 9 +- .../action/bulk/BulkOperationTests.java | 136 +++++------------- .../ingest/ConditionalProcessorTests.java | 8 +- .../ingest/PipelineProcessorTests.java | 10 +- .../security/authc/ApiKeyServiceTests.java | 16 +-- 5 files changed, 53 insertions(+), 126 deletions(-) diff --git a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java index b66778743aec0..5ed1a7d13b80a 100644 --- a/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java +++ b/libs/grok/src/test/java/org/elasticsearch/grok/MatcherWatchdogTests.java @@ -7,12 +7,12 @@ */ package org.elasticsearch.grok; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.test.ESTestCase; import org.joni.Matcher; import org.mockito.Mockito; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -77,16 +77,17 @@ public void testIdleIfNothingRegistered() throws Exception { ); // Periodic action is not scheduled because no thread is registered verifyNoMoreInteractions(threadPool); - CompletableFuture commandFuture = new CompletableFuture<>(); + + PlainActionFuture commandFuture = new PlainActionFuture<>(); // Periodic action is scheduled because a thread is registered doAnswer(invocationOnMock -> { - commandFuture.complete((Runnable) invocationOnMock.getArguments()[0]); + commandFuture.onResponse(invocationOnMock.getArgument(0)); return null; }).when(threadPool).schedule(any(Runnable.class), eq(interval), eq(TimeUnit.MILLISECONDS)); Matcher matcher = mock(Matcher.class); watchdog.register(matcher); // Registering the first thread should have caused the command to get scheduled again - Runnable command = commandFuture.get(1L, TimeUnit.MILLISECONDS); + Runnable command = safeGet(commandFuture); Mockito.reset(threadPool); watchdog.unregister(matcher); command.run(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java index e950901a538b4..0c0e1de74a3e7 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkOperationTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.ClusterName; @@ -60,9 +61,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -201,9 +200,6 @@ public void tearDownThreadpool() { public void testClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Not retryable ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(Metadata.CLUSTER_READ_ONLY_BLOCK).build()) @@ -215,9 +211,10 @@ public void testClusterBlockedFailsBulk() { when(observer.isTimedOut()).thenReturn(false); doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); - - expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(ClusterBlockException.class) + ); } /** @@ -226,9 +223,6 @@ public void testClusterBlockedFailsBulk() { public void testTimeoutOnRetryableClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Retryable final ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) @@ -248,9 +242,11 @@ public void testTimeoutOnRetryableClusterBlockedFailsBulk() { return null; }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(ClusterBlockException.class) + ); - expectThrows(ExecutionException.class, ClusterBlockException.class, future::get); verify(observer, times(2)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); } @@ -261,9 +257,6 @@ public void testTimeoutOnRetryableClusterBlockedFailsBulk() { public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { NodeClient client = getNodeClient(assertNoClientInteraction()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Retryable final ClusterState state = ClusterState.builder(DEFAULT_STATE) .blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES).build()) @@ -278,9 +271,10 @@ public void testNodeClosedOnRetryableClusterBlockedFailsBulk() { return null; }).doThrow(new AssertionError("Should not wait")).when(observer).waitForNextChange(any()); - newBulkOperation(client, new BulkRequest(), state, observer, listener).run(); - - expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, new BulkRequest(), state, observer, l).run()), + instanceOf(NodeClosedException.class) + ); verify(observer, times(1)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); } @@ -296,12 +290,7 @@ public void testBulkToIndex() throws Exception { NodeClient client = getNodeClient(acceptAllShardWrites()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); } @@ -318,12 +307,7 @@ public void testBulkToIndexFailingEntireShard() throws Exception { shardSpecificResponse(Map.of(new ShardId(indexMetadata.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -344,12 +328,7 @@ public void testBulkToDataStream() throws Exception { NodeClient client = getNodeClient(acceptAllShardWrites()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); } @@ -366,12 +345,7 @@ public void testBulkToDataStreamFailingEntireShard() throws Exception { shardSpecificResponse(Map.of(new ShardId(ds1BackingIndex2.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -396,12 +370,7 @@ public void testFailingEntireShardRedirectsToFailureStore() throws Exception { shardSpecificResponse(Map.of(new ShardId(ds2BackingIndex1.getIndex(), 0), failWithException(() -> new MapperException("test")))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -426,12 +395,7 @@ public void testFailingDocumentRedirectsToFailureStore() throws Exception { thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("test"))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -465,12 +429,7 @@ public void testFailureStoreShardFailureRejectsDocument() throws Exception { ) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -500,16 +459,12 @@ public void testFailedDocumentCanNotBeConvertedFails() throws Exception { thatFailsDocuments(Map.of(new IndexAndId(ds2BackingIndex1.getIndex().getName(), "3"), () -> new MapperException("root cause"))) ); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - // Mock a failure store document converter that always fails FailureStoreDocumentConverter mockConverter = mock(FailureStoreDocumentConverter.class); when(mockConverter.transformFailedRequest(any(), any(), any(), any())).thenThrow(new IOException("Could not serialize json")); - newBulkOperation(client, bulkRequest, mockConverter, listener).run(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, mockConverter, l).run()); - BulkResponse bulkItemResponses = future.get(); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -579,13 +534,10 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { return null; }).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.notifyOnce( - ActionListener.wrap(future::complete, future::completeExceptionally) + final SubscribableListener responseListener = SubscribableListener.newForked( + l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run() ); - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - // The operation will attempt to write the documents in the request, receive a failure, wait for a stable cluster state, and then // redirect the failed documents to the failure store. Wait for that failure store write to start: if (readyToPerformFailureStoreWrite.await(30, TimeUnit.SECONDS) == false) { @@ -595,7 +547,7 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { } // Check to make sure there is no response yet - if (future.isDone()) { + if (responseListener.isDone()) { // we're going to fail the test, but be a good citizen and unblock the other thread first beginFailureStoreWrite.countDown(); fail("bulk operation completed prematurely"); @@ -605,7 +557,7 @@ public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception { beginFailureStoreWrite.countDown(); // Await final result and verify - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(responseListener); assertThat(bulkItemResponses.hasFailures(), is(false)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds2FailureStore1.getIndex().getName())) @@ -650,12 +602,7 @@ public void testBlockedClusterRejectsFailureStoreDocument() throws Exception { when(observer.isTimedOut()).thenReturn(false); doThrow(new AssertionError("Should not wait on non retryable block")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -715,12 +662,7 @@ public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception { return null; }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); assertThat(bulkItemResponses.hasFailures(), is(true)); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) @@ -775,12 +717,10 @@ public void testNodeClosureRejectsFailureStoreDocument() { return null; }).doThrow(new AssertionError("Should not wait any longer")).when(observer).waitForNextChange(any()); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - expectThrows(ExecutionException.class, NodeClosedException.class, future::get); + assertThat( + safeAwaitFailure(BulkResponse.class, l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()), + instanceOf(NodeClosedException.class) + ); verify(observer, times(1)).isTimedOut(); verify(observer, times(1)).waitForNextChange(any()); @@ -832,12 +772,7 @@ public void testLazilyRollingOverFailureStore() throws Exception { ClusterState rolledOverState = ClusterState.builder(DEFAULT_STATE).metadata(metadata).build(); ClusterStateObserver observer = mockObserver(DEFAULT_STATE, DEFAULT_STATE, rolledOverState); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(item -> item.getIndex().equals(ds3FailureStore2.getIndex().getName())) .findFirst() @@ -880,12 +815,7 @@ public void testFailureWhileRollingOverFailureStore() throws Exception { ClusterState rolledOverState = ClusterState.builder(DEFAULT_STATE).metadata(metadata).build(); ClusterStateObserver observer = mockObserver(DEFAULT_STATE, DEFAULT_STATE, rolledOverState); - CompletableFuture future = new CompletableFuture<>(); - ActionListener listener = ActionListener.wrap(future::complete, future::completeExceptionally); - - newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, listener).run(); - - BulkResponse bulkItemResponses = future.get(); + BulkResponse bulkItemResponses = safeAwait(l -> newBulkOperation(client, bulkRequest, DEFAULT_STATE, observer, l).run()); BulkItemResponse failedItem = Arrays.stream(bulkItemResponses.getItems()) .filter(BulkItemResponse::isFailed) .findFirst() diff --git a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java index 3a6de10b5901d..546b252615b28 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ConditionalProcessorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.IngestConditionalScript; import org.elasticsearch.script.MockScriptEngine; @@ -25,7 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -242,14 +242,14 @@ public boolean execute(Map ctx) { private static void assertMutatingCtxThrows(Consumer> mutation) throws Exception { String scriptName = "conditionalScript"; - CompletableFuture expectedException = new CompletableFuture<>(); + PlainActionFuture expectedException = new PlainActionFuture<>(); ScriptService scriptService = new ScriptService( Settings.builder().build(), Map.of(Script.DEFAULT_SCRIPT_LANG, new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Map.of(scriptName, ctx -> { try { mutation.accept(ctx); } catch (Exception e) { - expectedException.complete(e); + expectedException.onResponse(e); } return false; }), Map.of())), @@ -267,7 +267,7 @@ private static void assertMutatingCtxThrows(Consumer> mutati IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); ingestDocument.setFieldValue("listField", new ArrayList<>()); execProcessor(processor, ingestDocument, (result, e) -> {}); - Exception e = expectedException.get(); + Exception e = safeGet(expectedException); assertThat(e, instanceOf(UnsupportedOperationException.class)); assertEquals("Mutating ingest documents in conditionals is not supported", e.getMessage()); assertStats(processor, 0, 0, 0); diff --git a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java index cfbdbc3792082..d9058e83acfe0 100644 --- a/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/PipelineProcessorTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; import org.elasticsearch.test.ESTestCase; @@ -16,7 +17,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; import java.util.function.LongSupplier; @@ -32,12 +32,12 @@ public class PipelineProcessorTests extends ESTestCase { public void testExecutesPipeline() throws Exception { String pipelineId = "pipeline"; IngestService ingestService = createIngestService(); - CompletableFuture invoked = new CompletableFuture<>(); + PlainActionFuture invoked = new PlainActionFuture<>(); IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); Pipeline pipeline = new Pipeline(pipelineId, null, null, null, new CompoundProcessor(new Processor() { @Override - public IngestDocument execute(final IngestDocument ingestDocument) throws Exception { - invoked.complete(ingestDocument); + public IngestDocument execute(final IngestDocument ingestDocument) { + invoked.onResponse(ingestDocument); return ingestDocument; } @@ -61,7 +61,7 @@ public String getDescription() { Map config = new HashMap<>(); config.put("name", pipelineId); factory.create(Map.of(), null, null, config).execute(testIngestDocument, (result, e) -> {}); - assertIngestDocument(testIngestDocument, invoked.get()); + assertIngestDocument(testIngestDocument, safeGet(invoked)); } public void testThrowsOnMissingPipeline() throws Exception { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index f4d75434b92de..fa6eb307933ec 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -145,7 +145,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.CompletableFuture; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -3442,15 +3441,12 @@ public static Authentication createApiKeyAuthentication( Authentication.newApiKeyAuthentication(authenticationResult, "node01"), threadContext ); - final CompletableFuture authFuture = new CompletableFuture<>(); - securityContext.executeAfterRewritingAuthentication((c) -> { - try { - authFuture.complete(authenticationContextSerializer.readFromContext(threadContext)); - } catch (IOException e) { - throw new RuntimeException(e); - } - }, version); - return authFuture.get(); + return safeAwait( + l -> securityContext.executeAfterRewritingAuthentication( + c -> ActionListener.completeWith(l, () -> authenticationContextSerializer.readFromContext(threadContext)), + version + ) + ); } public static Authentication createApiKeyAuthentication(ApiKeyService apiKeyService, Authentication authentication) From 6d886bc48d71076d37a07faceb1e421b95ec48fd Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Tue, 27 Aug 2024 09:20:59 +0200 Subject: [PATCH 07/46] Add dataset for full text search testing (#112105) --- .../xpack/esql/CsvTestsDataLoader.java | 4 +- .../testFixtures/src/main/resources/books.csv | 80 +++++++++++++++++ .../src/main/resources/mapping-books.json | 30 +++++++ .../main/resources/match-operator.csv-spec | 90 ++++++++++++------- .../src/main/resources/match.csv-spec | 53 ++++++----- 5 files changed, 203 insertions(+), 54 deletions(-) create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index d5e70d264c9be..b20e3bb0d5409 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -100,6 +100,7 @@ public class CsvTestsDataLoader { private static final TestsDataset DISTANCES = new TestsDataset("distances", "mapping-distances.json", "distances.csv"); private static final TestsDataset K8S = new TestsDataset("k8s", "k8s-mappings.json", "k8s.csv", "k8s-settings.json", true); private static final TestsDataset ADDRESSES = new TestsDataset("addresses", "mapping-addresses.json", "addresses.csv", null, true); + private static final TestsDataset BOOKS = new TestsDataset("books", "mapping-books.json", "books.csv", null, true); public static final Map CSV_DATASET_MAP = Map.ofEntries( Map.entry(EMPLOYEES.indexName, EMPLOYEES), @@ -126,7 +127,8 @@ public class CsvTestsDataLoader { Map.entry(DATE_NANOS.indexName, DATE_NANOS), Map.entry(K8S.indexName, K8S), Map.entry(DISTANCES.indexName, DISTANCES), - Map.entry(ADDRESSES.indexName, ADDRESSES) + Map.entry(ADDRESSES.indexName, ADDRESSES), + Map.entry(BOOKS.indexName, BOOKS) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv new file mode 100644 index 0000000000000..1deefaa3c6475 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/books.csv @@ -0,0 +1,80 @@ +book_no:keyword,title:text,author:text,year:integer,publisher:text,ratings:float,description:text +2924,A Gentle Creature and Other Stories: White Nights\, A Gentle Creature\, and The Dream of a Ridiculous Man (The World's Classics),[Fyodor Dostoevsky, Alan Myers, W. J. Leatherbarrow],2009,Oxford Paperbacks,4.00,In these stories Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. +7670,A Middle English Reader and Vocabulary,[Kenneth Sisam, J. R. R. Tolkien],2011,Courier Corporation,4.33,This highly respected anthology of medieval English literature features poetry\, prose and popular tales from Arthurian legend and classical mythology. Includes notes on each extract\, appendices\, and an extensive glossary by J. R. R. Tolkien. +7381,A Psychic in the Heartland: The Extraordinary Experiences of a Small Town Doctor,Bettilu Stein Faulkner,2003,Red Wheel/Weiser,4.50,The true story of a small-town doctor destined to live his life along two paths: one as a successful physician\, the other as a psychic with ever more interesting adventures. Experiencing a wide range of spiritual phenomena\, Dr. Riblet Hout learned about the connection between the healer and the healed\, our individual missions on earth\, free will\, and our relationship with God. He also paints a vivid picture of life on the other side as well as the moment of transition from physical life to afterlife. +2883,A Summer of Faulkner: As I Lay Dying/The Sound and the Fury/Light in August (Oprah's Book Club),William Faulkner,2005,Vintage Books,3.89,Presents three novels\, including As I Lay Dying\, in which the Bundren family journeys across Mississippi to bury their mother\, The Sound and the Fury\, in which Caddy Compson's story is narrated by her three brothers\, and Light in August\, in which th +4023,A Tolkien Compass: Including J. R. R. Tolkien's Guide to the Names in The Lord of the Rings,[Walter Scheps, Agnes Perkins, Charles Adolph Huttar, John Ronald Reuel Tolkien],1975,Open Court Publishing,4.67,The structure\, content\, and character of Tolkien's The Hobbit and The Lord of the Rings are dealt with in ten critical essays. +2382,A Wizard of Earthsea (Earthsea Trilogy Ser.),Ursula K. Le Guin,1991,Atheneum Books for Young Readers,4.01,A boy grows to manhood while attempting to subdue the evil he unleashed on the world as an apprentice to the Master Wizard. +7541,A Writer's Diary (Volume 1: 1873-1876),Fyodor Dostoevsky,1997,Northwestern University Press,4.50,Winner of the AATSEEL Outstanding Translation Award This is the first paperback edition of the complete collection of writings that has been called Dostoevsky's boldest experiment with literary form\, it is a uniquely encyclopedic forum of fictional and nonfictional genres. The Diary's radical format was matched by the extreme range of its contents. In a single frame it incorporated an astonishing variety of material: short stories\, humorous sketches\, reports on sensational crimes\, historical predictions\, portraits of famous people\, autobiographical pieces\, and plans for stories\, some of which were never written while others appeared in the Diary itself. +7400,Anna Karenina: Television Tie-In Edition (Signet classics),[Leo Tolstoy, SBP Editors],2019,Samaira Book Publishers,4.45,The Russian novelist and moral philosopher Leo Tolstoy (1828-1910) ranks as one of the world s great writers\, and his 'War and Peace' has been called the greatest novel ever written. But during his long lifetime\, Tolstoy also wrote enough shorter works to fill many volumes. The message in all his stories is presented with such humour that the reader hardly realises that it is strongly didactic. These stories give a snapshot of Russia and its people in the late nineteenth century. +4917,Autumn of the Patriarch,Gabriel Garcia Marquez,2014,Penguin UK,4.33,Gabriel Garcia Marquez\, winner of the 1982 Nobel Prize for Literature and author of One Hundred Years of Solitude\, explores the loneliness of power in Autumn of the Patriarch. 'Over the weekend the vultures got into the presidential palace by pecking through the screens on the balcony windows and the flapping of their wings stirred up the stagnant time inside' As the citizens of an unnamed Caribbean nation creep through dusty corridors in search of their tyrannical leader\, they cannot comprehend that the frail and withered man lying dead on the floor can be the self-styled General of the Universe. Their arrogant\, manically violent leader\, known for serving up traitors to dinner guests and drowning young children at sea\, can surely not die the humiliating death of a mere mortal? Tracing the demands of a man whose egocentric excesses mask the loneliness of isolation and whose lies have become so ingrained that they are indistinguishable from truth\, Marquez has created a fantastical portrait of despotism that rings with an air of reality. 'Delights with its quirky humanity and black humour and impresses by its total originality' Vogue 'Captures perfectly the moral squalor and political paralysis that enshrouds a society awaiting the death of a long-term dictator' Guardian 'Marquez writes in this lyrical\, magical language that no-one else can do' Salman Rushdie +9896,Barn burning (A tale blazer book),William Faulkner,1979,Perfection Learning,3.50,Reprinted from Collected Stories of William Faulkner\, by permission of Random House\, Inc. +9607,Beowolf: The monsters and the critics,John Ronald Reuel Tolkien,1997,HarperCollins UK,4.12,A collection of seven essays by J.R.R. Tolkien arising out of Tolkien's work in medieval literature +1985,Brothers Karamazov,Fyodor Dostoevsky,2015,First Avenue Editions,5.00,Four brothers reunite in their hometown in Russia. The murder of their father forces the brothers to question their beliefs about each other\, religion\, and morality. +2713,Collected Stories of William Faulkner,William Faulkner,1995,Vintage,4.53,A collection of short stories focuses on the people of rural Mississippi +2464,Conversations with Kurt Vonnegut (Literary Conversations),Kurt Vonnegut,1988,Univ. Press of Mississippi,4.40,Gathers interviews with Vonnegut from each period of his career and offers a brief profile of his life and accomplishments +8534,Crime and Punishment (Oxford World's Classics),Fyodor Dostoevsky,2017,Oxford University Press,4.38,'One death\, in exchange for thousands of lives - it's simple arithmetic!' A new translation of Dostoevsky's epic masterpiece\, Crime and Punishment (1866). The impoverished student Raskolnikov decides to free himself from debt by killing an old moneylender\, an act he sees as elevating himself above conventional morality. Like Napoleon he will assert his will and his crime will be justified by its elimination of 'vermin' for the sake of the greater good. But Raskolnikov is torn apart by fear\, guilt\, and a growing conscience under the influence of his love for Sonya. Meanwhile the police detective Porfiry is on his trial. It is a powerfully psychological novel\, in which the St Petersburg setting\, Dostoevsky's own circumstances\, and contemporary social problems all play their part. +8605,Dead Souls,Nikolai Gogol,1997,Vintage,4.28,Chichikov\, an amusing and often confused schemer\, buys deceased serfs' names from landholders' poll tax lists hoping to mortgage them for profit +6970,Domestic Goddesses,Edith Vonnegut,1998,Pomegranate,4.67,In this immensely charming and insightful book\, artist Edith Vonnegut takes issue with traditional art imagery in which women are shown as weak and helpless. Through twenty-seven of her own paintings interspersed with her text\, she poignantly -- and humorously -- illustrates her maxim that the lives of mothers and homemakers are filled with endless challenges and vital decisions that should be portrayed with the dignity they deserve. In Vonnegut's paintings\, one woman bravely blocks the sun from harming a child (Sun Block) while another vacuums the stairs with angelic figures singing her praises (Electrolux). In contrasting her own Domestic Goddesses with the diaphanous women of classical art (seven paintings by masters such as Titian and Botticelli are included)\, she 'expresses the importance of traditional roles of women so cleverly and with such joy that her message and images will be forever emblazoned on our collective psyche. +4814,El Coronel No Tiene Quien Le Escriba / No One Writes to the Colonel (Spanish Edition),Gabriel Garcia Marquez,2005,Harper Collins,4.45,Written with compassionate realism and wit\, the stories in this mesmerizing collection depict the disparities of town and village life in South America\, of the frightfully poor and outrageously rich\, of memories and illusions\, and of lost opportunities and present joys. +4636,FINAL WITNESS,Simon Tolkien,2004,Random House Digital\, Inc.,3.94,The murder of Lady Anne Robinson by two intruders causes a schism in the victim's family when her son convinces police that his father's beautiful personal assistant hired the killers\, while his father\, the British minister of defense\, refuses to believe his son and marries the accused. A first novel. Reprint. +2936,Fellowship of the Ring 2ND Edition,John Ronald Reuel Tolkien,2008,HarperCollins UK,4.43,Sauron\, the Dark Lord\, has gathered to him all the Rings of Power - the means by which he intends to rule Middle-earth. All he lacks in his plans for dominion is the One Ring - the ring that rules them all - which has fallen into the hands of the hobbit\, Bilbo Baggins. In a sleepy village in the Shire\, young Frodo Baggins finds himself faced with an immense task\, as his elderly cousin Bilbo entrusts the Ring to his care. Frodo must leave his home and make a perilous journey across Middle-earth to the Cracks of Doom\, there to destroy the Ring and foil the Dark Lord in his evil purpose. JRR Tolkien's great work of imaginative fiction has been labelled both a heroic romance and a classic fantasy fiction. By turns comic and homely\, epic and diabolic\, the narrative moves through countless changes of scene and character in an imaginary world which is totally convincing in its detail. +8956,GOD BLESS YOU MR. ROSEWATER : Or Pearls Before Swine,Kurt Vonnegut,1970,New York : Dell,4.00,A lawyer schemes to gain control of a large fortune by having the present claimant declared insane. +6818,Hadji Murad,Leo Tolstoy,2022,Hachette UK,3.88,'How truth thickens and deepens when it migrates from didactic fable to the raw experience of a visceral awakening is one of the thrills of Tolstoy's stories' Sharon Cameron in her preface to Hadji Murad and Other Stories This\, the third volume of Tolstoy's shorter fiction concentrates on his later stories\, including one of his greatest\, 'Hadji Murad'. In the stark form of homily that shapes these later works\, life considered as one's own has no rational meaning. From the chain of events that follows in the wake of two schoolboys' deception in 'The Forged Coupon' to the disillusionment of the narrator in 'After the Ball' we see\, in Virginia Woolf's observation\, that Tolstoy puts at the centre of his writing one 'who gathers into himself all experience\, turns the world round between his fingers\, and never ceases to ask\, even as he enjoys it\, what is the meaning of it'. The riverrun edition reissues the translation of Louise and Aylmer Maude\, whose influential versions of Tolstoy first brought his work to a wide readership in English. +3950,Hocus,Kurt Vonnegut,1997,Penguin,4.67,Tarkington College\, a small\, exclusive college in upstate New York\, is turned upside down when ten thousand prisoners from the maximum security prison across Lake Mohiga break out and head for the college +5404,Intruder in the dust,William Faulkner,2011,Vintage,3.18,A classic Faulkner novel which explores the lives of a family of characters in the South. An aging black who has long refused to adopt the black's traditionally servile attitude is wrongfully accused of murdering a white man. +5578,Intruder in the dust: A novel,William Faulkner,1991,Vintage,3.18,Dramatizes the events that surround the murder of a white man in a volatile Southern community +6380,La hojarasca (Spanish Edition),Gabriel Garcia Marquez,1979,Harper Collins,3.75,Translated from the Spanish by Gregory Rabassa +5335,Letters of J R R Tolkien,J.R.R. Tolkien,2014,HarperCollins,4.70,This collection will entertain all who appreciate the art of masterful letter writing. The Letters of J.R.R Tolkien sheds much light on Tolkien's creative genius and grand design for the creation of a whole new world: Middle-earth. Featuring a radically expanded index\, this volume provides a valuable research tool for all fans wishing to trace the evolution of THE HOBBIT and THE LORD OF THE RINGS. +3870,My First 100 Words in Spanish/English (My First 100 Words Pull-Tab Book),Keith Faulkner,1998,Libros Para Ninos,4.50,Learning a foreign language has never been this much fun! Just pull the sturdy tabs and change the words under the pictures from English to Spanish and back again to English! +4502,O'Brian's Bride,Colleen Faulkner,1995,Zebra Books,5.00,Abandoning her pampered English life to marry a man in the American colonies\, Elizabeth finds her new world shattered when her husband is killed in an accident\, leaving her in charge of a business on the untamed frontier. Original. +7635,Oliphaunt (Beastly Verse),J. R. R. Tolkien,1989,Contemporary Books,2.50,A poem in which an elephant describes himself and his way of life. On board pages. +3254,Pearl and Sir Orfeo,[John Ronald Reuel Tolkien, Christopher Tolkien],1995,Harpercollins Pub Limited,5.00,Three epic poems from 14th century England speak of life during the age of chivalry. Translated from medieval English. +3677,Planet of Exile,Ursula K. Le Guin,1979,Orion,4.20,PLAYAWAY: An alliance between the powerful Tevars and the brown-skinned\, clairvoyant Farbons must take place if the two colonies are to withstand the fierce attack of the nomadic tribes from the north of the planet Eltanin. +4289,Poems from the Hobbit,J R R Tolkien,1999,HarperCollins Publishers,4.00,A collection of J.R.R. Tolkien's Hobbit poems in a miniature hardback volume complete with illustrations by Tolkien himself. Far over misty mountains cold To dungeons deep and caverns old We must away ere break of day To seek the pale enchanted gold. J.R.R. Tolkien's acclaimed The Hobbit contains 12 poems which are themselves masterpieces of writing. This miniature book\, illustrated with 30 of Tolkien's own paintings and drawings from the book -- some quite rare and all in full colour -- includes all the poems\, plus Gollum's eight riddles in verse\, and will be a perfect keepsake for lovers of The Hobbit and of accomplished poetry. +6151,Pop! Went Another Balloon: A Magical Counting Storybook (Magical Counting Storybooks),[Keith Faulkner, Rory Tyger],2003,Dutton Childrens Books,5.00,Toby the turtle goes from in-line skates to a motorcycle to a rocketship with a handful of balloons that pop\, one by one\, along the way. +3535,Rainbow's End: A Magical Story and Moneybox,[Keith Faulkner, Beverlie Manson],2003,Barrons Juveniles,4.00,In this combination picture storybook and coin bank\, the unusual front cover shows an illustration from the story that's embellished with five transparent plastic windows. Opening the book\, children will find a story about a poor little ballerina who is crying because her dancing shoes are worn and she has no money to replace them. Full color. Consumable. +8423,Raising Faithful Kids in a Fast-Paced World,Paul Faulkner,1995,Howard Publishing Company,5.00,To find help for struggling parents\, Dr. Paul Faulkner--renowned family counselor and popular speaker--interviewed 30 successful families who have managed to raise faithful kids while also maintaining demanding careers. The invaluable strategies and methods he gleaned are now available in this powerful book delivered in Dr. Faulkner's warm\, humorous style. +1463,Realms of Tolkien: Images of Middle-earth,J. R. R. Tolkien,1997,HarperCollins Publishers,4.00,Twenty new and familiar Tolkien artists are represented in this fabulous volume\, breathing an extraordinary variety of life into 58 different scenes\, each of which is accompanied by appropriate passage from The Hobbit and The Lord of the Rings and The Silmarillion +6323,Resurrection (The Penguin classics),Leo Tolstoy,2009,Penguin,3.25,Leo Tolstoy's last completed novel\, Resurrection is an intimate\, psychological tale of guilt\, anger and forgiveness Serving on the jury at a murder trial\, Prince Dmitri Nekhlyudov is devastated when he sees the prisoner - Katyusha\, a young maid he seduced and abandoned years before. As Dmitri faces the consequences of his actions\, he decides to give up his life of wealth and luxury to devote himself to rescuing Katyusha\, even if it means following her into exile in Siberia. But can a man truly find redemption by saving another person? Tolstoy's most controversial novel\, Resurrection (1899) is a scathing indictment of injustice\, corruption and hypocrisy at all levels of society. Creating a vast panorama of Russian life\, from peasants to aristocrats\, bureaucrats to convicts\, it reveals Tolstoy's magnificent storytelling powers. Anthony Briggs' superb new translation preserves Tolstoy's gripping realism and satirical humour. In his introduction\, Briggs discusses the true story behind Resurrection\, Tolstoy's political and religious reasons for writing the novel\, his gift for characterization and the compelling psychological portrait of Dmitri. This edition also includes a chronology\, notes and a summary of chapters. For more than seventy years\, Penguin has been the leading publisher of classic literature in the English-speaking world. With more than 1\,700 titles\, Penguin Classics represents a global bookshelf of the best works throughout history and across genres and disciplines. Readers trust the series to provide authoritative texts enhanced by introductions and notes by distinguished scholars and contemporary authors\, as well as up-to-date translations by award-winning translators. +2714,Return of the King Being the Third Part of The Lord of the Rings,J. R. R. Tolkien,2012,HarperCollins,4.60,Concluding the story begun in The Hobbit\, this is the final part of Tolkien s epic masterpiece\, The Lord of the Rings\, featuring an exclusive cover image from the film\, the definitive text\, and a detailed map of Middle-earth. The armies of the Dark Lord Sauron are massing as his evil shadow spreads ever wider. Men\, Dwarves\, Elves and Ents unite forces to do battle agains the Dark. Meanwhile\, Frodo and Sam struggle further into Mordor in their heroic quest to destroy the One Ring. The devastating conclusion of J.R.R. Tolkien s classic tale of magic and adventure\, begun in The Fellowship of the Ring and The Two Towers\, features the definitive edition of the text and includes the Appendices and a revised Index in full. To celebrate the release of the first of Peter Jackson s two-part film adaptation of The Hobbit\, THE HOBBIT: AN UNEXPECTED JOURNEY\, this third part of The Lord of the Rings is available for a limited time with an exclusive cover image from Peter Jackson s award-winning trilogy. +7350,Return of the Shadow,[John Ronald Reuel Tolkien, Christopher Tolkien],2000,Mariner Books,5.00,In this sixth volume of The History of Middle-earth the story reaches The Lord of the Rings. In The Return of the Shadow (an abandoned title for the first volume) Christopher Tolkien describes\, with full citation of the earliest notes\, outline plans\, and narrative drafts\, the intricate evolution of The Fellowship of the Ring and the gradual emergence of the conceptions that transformed what J.R.R. Tolkien for long believed would be a far shorter book\, 'a sequel to The Hobbit'. The enlargement of Bilbo's 'magic ring' into the supremely potent and dangerous Ruling Ring of the Dark Lord is traced and the precise moment is seen when\, in an astonishing and unforeseen leap in the earliest narrative\, a Black Rider first rode into the Shire\, his significance still unknown. The character of the hobbit called Trotter (afterwards Strider or Aragorn) is developed while his indentity remains an absolute puzzle\, and the suspicion only very slowly becomes certainty that he must after all be a Man. The hobbits\, Frodo's companions\, undergo intricate permutations of name and personality\, and other major figures appear in strange modes: a sinister Treebeard\, in league with the Enemy\, a ferocious and malevolent Farmer Maggot. The story in this book ends at the point where J.R.R. Tolkien halted in the story for a long time\, as the Company of the Ring\, still lacking Legolas and Gimli\, stood before the tomb of Balin in the Mines of Moria. The Return of the Shadow is illustrated with reproductions of the first maps and notable pages from the earliest manuscripts. +6760,Roverandom,J. R. R. Tolkien,1999,Mariner Books,4.38,Rover\, a dog who has been turned into a toy dog encounters rival wizards and experiences various adventures on the moon with giant spiders\, dragon moths\, and the Great White Dragon. By the author of The Hobbit. Reprint. +8873,Searoad: Chronicles of Klatsand,Ursula K. Le Guin,2004,Shambhala Publications,5.00,A series of interlinking tales and a novella by the author of the Earthsea trilogy portrays the triumphs and struggles of several generations of women who independently control Klatsand\, a small resort town on the Oregon coast. Reprint. +2378,Selected Letters of Lucretia Coffin Mott (Women in American History),[Lucretia Mott, Holly Byers Ochoa, Carol Faulkner],2002,University of Illinois Press,5.00,Dedicated to reform of almost every kind - temperance\, peace\, equal rights\, woman suffrage\, nonresistance\, and the abolition of slavery - Mott viewed women's rights as only one element of a broad-based reform agenda for American society. +1502,Selected Passages from Correspondence with Friends,Nikolai Vasilevich Gogol,2009,Vanderbilt University Press,4.00,Nikolai Gogol wrote some letters to his friends\, none of which were a nose of high rank. Many are reproduced here (the letters\, not noses). +5996,Smith of Wooten Manor & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,4.91,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +2301,Smith of Wootton Major & Farmer Giles of Ham,John Ronald Reuel Tolkien,1969,Del Rey,5.00,Two bewitching fantasies by J.R.R. Tolkien\, beloved author of THE HOBBIT. In SMITH OF WOOTTON MAJOR\, Tolkien explores the gift of fantasy\, and what it means to the life and character of the man who receives it. And FARMER GILES OF HAM tells a delightfully ribald mock-heroic tale\, where a dragon who invades a town refuses to fight\, and a farmer is chosen to slay him. +2236,Steering the Craft,Ursula K. Le Guin,2015,Houghton Mifflin Harcourt,4.73,A revised and updated guide to the essentials of a writer's craft\, presented by a brilliant practitioner of the art Completely revised and rewritten to address the challenges and opportunities of the modern era\, this handbook is a short\, deceptively simple guide to the craft of writing. Le Guin lays out ten chapters that address the most fundamental components of narrative\, from the sound of language to sentence construction to point of view. Each chapter combines illustrative examples from the global canon with Le Guin's own witty commentary and an exercise that the writer can do solo or in a group. She also offers a comprehensive guide to working in writing groups\, both actual and online. Masterly and concise\, Steering the Craft deserves a place on every writer's shelf. +4724,THE UNVANQUISHED,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +5948,That We Are Gentle Creatures,Fyodor Dostoevsky,2009,OUP Oxford,4.33,In the stories in this volume Dostoevsky explores both the figure of the dreamer divorced from reality and also his own ambiguous attitude to utopianism\, themes central to many of his great novels. In White Nights the apparent idyll of the dreamer's romantic fantasies disguises profound loneliness and estrangement from 'living life'. Despite his sentimental friendship with Nastenka\, his final withdrawal into the world of the imagination anticipates the retreat into the 'underground' of many of Dostoevsky's later intellectual heroes. A Gentle Creature and The Dream of a Ridiculous Man show how such withdrawal from reality can end in spiritual desolation and moral indifference and how\, in Dostoevsky's view\, the tragedy of the alienated individual can be resolved only by the rediscovery of a sense of compassion and responsibility towards fellow human beings. This new translation captures the power and lyricism of Dostoevsky's writing\, while the introduction examines the stories in relation to one another and to his novels. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +1937,The Best Short Stories of Dostoevsky (Modern Library),Fyodor Dostoevsky,2012,Modern Library,4.33,This collection\, unique to the Modern Library\, gathers seven of Dostoevsky's key works and shows him to be equally adept at the short story as with the novel. Exploring many of the same themes as in his longer works\, these small masterpieces move from the tender and romantic White Nights\, an archetypal nineteenth-century morality tale of pathos and loss\, to the famous Notes from the Underground\, a story of guilt\, ineffectiveness\, and uncompromising cynicism\, and the first major work of existential literature. Among Dostoevsky's prototypical characters is Yemelyan in The Honest Thief\, whose tragedy turns on an inability to resist crime. Presented in chronological order\, in David Magarshack's celebrated translation\, this is the definitive edition of Dostoevsky's best stories. +2776,The Devil and Other Stories (Oxford World's Classics),Leo Tolstoy,2003,OUP Oxford,5.00,'It is impossible to explain why Yevgeny chose Liza Annenskaya\, as it is always impossible to explain why a man chooses this and not that woman.' This collection of eleven stories spans virtually the whole of Tolstoy's creative life. While each is unique in form\, as a group they are representative of his style\, and touch on the central themes that surface in War and Peace and Anna Karenina. Stories as different as 'The Snowstorm'\, 'Lucerne'\, 'The Diary of a Madman'\, and 'The Devil' are grounded in autobiographical experience. They deal with journeys of self-discovery and the moral and religious questioning that characterizes Tolstoy's works of criticism and philosophy. 'Strider' and 'Father Sergy'\, as well as reflecting Tolstoy's own experiences\, also reveal profound psychological insights. These stories range over much of the Russian world of the nineteenth century\, from the nobility to the peasantry\, the military to the clergy\, from merchants and cobblers to a horse and a tree. Together they present a fascinating picture of Tolstoy's skill and artistry. ABOUT THE SERIES: For over 100 years Oxford World's Classics has made available the widest range of literature from around the globe. Each affordable volume reflects Oxford's commitment to scholarship\, providing the most accurate text plus a wealth of other valuable features\, including expert introductions by leading authorities\, helpful notes to clarify the text\, up-to-date bibliographies for further study\, and much more. +4231,The Dispossessed,Ursula K. Le Guin,1974,Harpercollins,4.26,Frequently reissued with the same ISBN\, but with slightly differing bibliographical details. +7480,The Hobbit,J. R. R. Tolkien,2012,Mariner Books,4.64,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +6405,The Hobbit or There and Back Again,J. R. R. Tolkien,2012,Mariner Books,4.63,Celebrating 75 years of one of the world's most treasured classics with an all new trade paperback edition. Repackaged with new cover art. 500\,000 first printing. +2540,The Inspector General (Language - Russian) (Russian Edition),[Nicolai Gogol, Thomas Seltzer],2014,CreateSpace,3.50,The Inspector-General is a national institution. To place a purely literary valuation upon it and call it the greatest of Russian comedies would not convey the significance of its position either in Russian literature or in Russian life itself. There is no other single work in the modern literature of any language that carries with it the wealth of associations which the Inspector-General does to the educated Russian. +2951,The Insulted and Injured,Fyodor Dostoevsky,2011,Wm. B. Eerdmans Publishing,4.00,The Insulted and Injured\, which came out in 1861\, was Fyodor Dostoevsky's first major work of fiction after his Siberian exile and the first of the long novels that made him famous. Set in nineteenth-century Petersburg\, this gripping novel features a vividly drawn set of characters - including Vanya (Dostoevsky's semi-autobiographical hero)\, Natasha (the woman he loves)\, and Alyosha (Natasha's aristocratic lover) - all suffering from the cruelly selfish machinations of Alyosha's father\, the dark and powerful Prince Valkovsky. Boris Jakim's fresh English-language rendering of this gem in the Doestoevsky canon is both more colorful and more accurate than any earlier translation. --from back cover. +2130,The J. R. R. Tolkien Audio Collection,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,HarperCollins Publishers,4.89,For generations\, J R R Tolkien's words have brought to thrilling life a world of hobbits\, magic\, and historic myth\, woken from its foggy slumber within our minds. Here\, he tells the tales in his own voice. +9801,The Karamazov Brothers (Oxford World's Classics),Fyodor Dostoevsky,2008,Oxford University Press,4.40,A remarkable work showing the author's power to depict Russian character and his understanding of human nature. Driven by intense\, uncontrollable emotions of rage and revenge\, the four Karamazov brothers all become involved in the brutal murder of their despicable father. +5469,The Lays of Beleriand,[John Ronald Reuel Tolkien, Christopher Tolkien],2002,Harpercollins Pub Limited,4.42,The third volume that contains the early myths and legends which led to the writing of Tolkien's epic tale of war\, The Silmarillion. This\, the third volume of The History of Middle-earth\, gives us a priviledged insight into the creation of the mythology of Middle-earth\, through the alliterative verse tales of two of the most crucial stories in Tolkien's world -- those of Turien and Luthien. The first of the poems is the unpublished Lay of The Children of Hurin\, narrating on a grand scale the tragedy of Turin Turambar. The second is the moving Lay of Leithian\, the chief source of the tale of Beren and Luthien in The Silmarillion\, telling of the Quest of the Silmaril and the encounter with Morgoth in his subterranean fortress. Accompanying the poems are commentaries on the evolution of the history of the Elder Days. Also included is the notable criticism of The Lay of The Leithian by CS Lewis\, who read the poem in 1929. +2675,The Lord of the Rings - Boxed Set,J.R.R. Tolkien,2012,HarperCollins,4.56,This beautiful gift edition of The Hobbit\, J.R.R. Tolkien's classic prelude to his Lord of the Rings trilogy\, features cover art\, illustrations\, and watercolor paintings by the artist Alan Lee. Bilbo Baggins is a hobbit who enjoys a comfortable\, unambitious life\, rarely traveling any farther than his pantry or cellar. But his contentment is disturbed when the wizard Gandalf and a company of dwarves arrive on his doorstep one day to whisk him away on an adventure. They have launched a plot to raid the treasure hoard guarded by Smaug the Magnificent\, a large and very dangerous dragon. Bilbo reluctantly joins their quest\, unaware that on his journey to the Lonely Mountain he will encounter both a magic ring and a frightening creature known as Gollum. Written for J.R.R. Tolkien's own children\, The Hobbit has sold many millions of copies worldwide and established itself as a modern classic. +7140,The Lord of the Rings Poster Collection: Six Paintings by Alan Lee (No. 1),[J. R. R. Tolkien, Alan Lee],2002,HarperSport,4.75,A selection of stunning poster paintings from the celebrated Tolkien artist Alan Lee - the man behind many of the striking images from The Lord of The Rings movie. The 50 paintings contained within the centenary edition of The Lord of the Rings in 1992 have themselves become classics and Alan Lee's interpretations are hailed as the most faithful to Tolkien's own vision. This new poster collection\, a perfect complement to volume one\, reproduces six more of the most popular paintings from the book in a format suitable either for hanging as posters or mounting and framing. +5127,The Overcoat, Nikolai Gogol,1992,Courier Corporation,3.75,Four short stories include a satirical tale of Russian bureaucrats and a portrayal of an elderly couple living in the secluded countryside. +8875,The Two Towers,John Ronald Reuel Tolkien,2007,HarperCollins UK,4.64,The second volume in The Lord of the Rings\, This title is also available as a film. +4977,The Unvanquished,William Faulkner,2011,Vintage,3.50,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +4382,The Wolves of Witchmaker,Carole Guinane,2001,iUniverse,5.00,Polly Lavender is mysteriously lured onto Witchmaker's grounds along with her best friends Tony Rico\, Gracie Reene\, and Zeus\, the wolf they rescued as a pup. The three must quickly learn to master the art of magic because they have been chosen to lead Witchmaker Prep against a threat that has grim consequences. +7912,The Word For World is Forest,Ursula K. Le Guin,2015,Gollancz,4.22,When the inhabitants of a peaceful world are conquered by the bloodthirsty yumens\, their existence is irrevocably altered. Forced into servitude\, the Athsheans find themselves at the mercy of their brutal masters. Desperation causes the Athsheans\, led by Selver\, to retaliate against their captors\, abandoning their strictures against violence. But in defending their lives\, they have endangered the very foundations of their society. For every blow against the invaders is a blow to the humanity of the Athsheans. And once the killing starts\, there is no turning back. +1211,The brothers Karamazov,Fyodor Dostoevsky,2003,Bantam Classics,1.00,In 1880 Dostoevsky completed The Brothers Karamazov\, the literary effort for which he had been preparing all his life. Compelling\, profound\, complex\, it is the story of a patricide and of the four sons who each had a motive for murder: Dmitry\, the sensualist\, Ivan\, the intellectual\, Alyosha\, the mystic\, and twisted\, cunning Smerdyakov\, the bastard child. Frequently lurid\, nightmarish\, always brilliant\, the novel plunges the reader into a sordid love triangle\, a pathological obsession\, and a gripping courtroom drama. But throughout the whole\, Dostoevsky searhes for the truth--about man\, about life\, about the existence of God. A terrifying answer to man's eternal questions\, this monumental work remains the crowning achievement of perhaps the finest novelist of all time. From the Paperback edition. +8086,The grand inquisitor (Milestones of thought),Fyodor Dostoevsky,1981,A&C Black,4.09,Dostoevsky's portrayal of the Catholic Church during the Inquisition is a plea for the power of pure faith\, and a critique of the tyrannies of institutionalized religion. This is an except from the Brothers Karamazov which stands alone as a statement of philiosophy and a warning about the surrender of freedom for the sake of comfort. +8077,The unvanquished,William Faulkner,2011,Vintage,4.00,Set in Mississippi during the Civil War and Reconstruction\, THE UNVANQUISHED focuses on the Sartoris family\, who\, with their code of personal responsibility and courage\, stand for the best of the Old South's traditions. +8480,The wind's twelve quarters: Short stories,Ursula K. Le Guin,2017,HarperCollins,5.00,The recipient of numerous literary prizes\, including the National Book Award\, the Kafka Award\, and the Pushcart Prize\, Ursula K. Le Guin is renowned for her lyrical writing\, rich characters\, and diverse worlds. The Wind's Twelve Quarters collects seventeen powerful stories\, each with an introduction by the author\, ranging from fantasy to intriguing scientific concepts\, from medieval settings to the future. Including an insightful foreword by Le Guin\, describing her experience\, her inspirations\, and her approach to writing\, this stunning collection explores human values\, relationships\, and survival\, and showcases the myriad talents of one of the most provocative writers of our time. +2847,To Love A Dark Stranger (Lovegram Historical Romance),Colleen Faulkner,1997,Zebra Books,5.00,Bestselling author Colleen Faulkner's tumultuous saga of royal intrigue and forbidden desire sweeps from the magnificent estates of the aristocracy to the shadowy streets of London to King Charles II's glittering Restoration court. +3293,Universe by Design,Danny Faulkner,2004,New Leaf Publishing Group,4.25,Views the stars and planets from a creationist standpoint\, addresses common misconceptions and difficulties about relativity and cosmology\, and discusses problems with the big bang theory with many analogies\, examples\, diagrams\, and illustrations. Original. +5327,War and Peace,Leo Tolstoy,2016,Lulu.com,3.84,Covering the period from the French invasion under Napoleon into Russia. Although not covering solely the war itself\, the serialized novel does cover the effects the war had on Russian society from the common person right up to the Tsar himself. The book starts to move more to a philosophical consideration on war and peace near the end making the book as a whole an important piece of literature. +4536,War and Peace (Signet Classics),[Leo Tolstoy, Pat Conroy, John Hockenberry],2012,Signet Classics,4.75,Presents the classical epic of the Napoleonic Wars and their effects on four Russian families. +9032,War and Peace: A Novel (6 Volumes),Tolstoy Leo,2013,Hardpress Publishing,3.81,Unlike some other reproductions of classic texts (1) We have not used OCR(Optical Character Recognition)\, as this leads to bad quality books with introduced typos. (2) In books where there are images such as portraits\, maps\, sketches etc We have endeavoured to keep the quality of these images\, so they represent accurately the original artefact. Although occasionally there may be certain imperfections with these old texts\, we feel they deserve to be made available for future generations to enjoy. +5119,William Faulkner,William Faulkner,2011,Vintage,4.00,This invaluable volume\, which has been republished to commemorate the one-hundredth anniversary of Faulkner's birth\, contains some of the greatest short fiction by a writer who defined the course of American literature. Its forty-five stories fall into three categories: those not included in Faulkner's earlier collections\, previously unpublished short fiction\, and stories that were later expanded into such novels as The Unvanquished\, The Hamlet\, and Go Down\, Moses. With its Introduction and extensive notes by the biographer Joseph Blotner\, Uncollected Stories of William Faulkner is an essential addition to its author's canon--as well as a book of some of the most haunting\, harrowing\, and atmospheric short fiction written in the twentieth century. +8615,Winter notes on summer impressions,Fyodor Dostoevsky,2018,Alma Books,4.75,In June 1862\, Dostoevsky left Petersburg on his first excursion to Western Europe. Ostensibly making the trip to consult Western specialists about his epilepsy\, he also wished to see first-hand the source of the Western ideas he believed were corrupting Russia. Over the course of his journey he visited a number of major cities\, including Berlin\, Paris\, London\, Florence\, Milan and Vienna.His record of the trip\, Winter Notes on Summer Impressions - first published in the February 1863 issue of Vremya\, the periodical he edited - is the chrysalis out of which many elements of his later masterpieces developed. +6478,Woman-The Full Story: A Dynamic Celebration of Freedoms,Michele Guinness,2003,Zondervan,5.00,What does it mean to be a woman today? What have women inherited from their radical\, risk-taking sisters of the past? And how does God view this half of humanity? Michele Guinness invites us on an adventure of discovery\, exploring the biblical texts\, the annals of history and the experiences of women today in search of the challenges and achievements\, failures and joys\, of women throughout the ages. +8678,Worlds of Exile and Illusion: Three Complete Novels of the Hainish Series in One Volume--Rocannon's World\, Planet of Exile\, City of Illusions,Ursula K. Le Guin,2016,Orb Books,4.41,Worlds of Exile and Illusion contains three novels in the Hainish Series from Ursula K. Le Guin\, one of the greatest science fiction writers and many times the winner of the Hugo and Nebula Awards. Her career as a novelist was launched by the three novels contained here. These books\, Rocannon's World\, Planet of Exile\, and City of Illusions\, are set in the same universe as Le Guin's groundbreaking classic\, The Left Hand of Darkness. At the Publisher's request\, this title is being sold without Digital Rights Management Software (DRM) applied. diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json new file mode 100644 index 0000000000000..29e3c94c579b1 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-books.json @@ -0,0 +1,30 @@ +{ + "properties": { + "book_no": { + "type": "keyword" + }, + "title": { + "type": "text" + }, + "author": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword" + } + } + }, + "description": { + "type": "text" + }, + "publisher": { + "type": "text" + }, + "ratings": { + "type": "float" + }, + "year": { + "type": "integer" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec index 574f27b8c1fed..56eded5ce4603 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match-operator.csv-spec @@ -4,65 +4,89 @@ singleMatchWithTextField required_capability: match_operator -from airports | where name match "london" | keep abbrev, name | sort abbrev; +from books | where author match "William Faulkner" | keep book_no, author | sort book_no | LIMIT 5; -abbrev:keyword | name:text -LGW | London Gatwick -LHR | London Heathrow -LTN | London Luton +book_no:keyword | author:text +2378 | [Carol Faulkner, Holly Byers Ochoa, Lucretia Mott] +2713 | William Faulkner +2847 | Colleen Faulkner +2883 | William Faulkner +3293 | Danny Faulkner ; singleMatchWithKeywordField required_capability: match_operator -from airports | where abbrev match "LTN" | keep abbrev, name | sort abbrev; +from books | where author.keyword match "William Faulkner" | keep book_no, author | sort book_no; -abbrev:keyword | name:text -LTN | London Luton +book_no:keyword | author:text +2713 | William Faulkner +2883 | William Faulkner +4724 | William Faulkner +4977 | William Faulkner +5119 | William Faulkner +5404 | William Faulkner +5578 | William Faulkner +8077 | William Faulkner +9896 | William Faulkner ; multipleMatch required_capability: match_operator -from airports | where name match "london" or name match "liverpool "| keep abbrev, name | sort abbrev; +from books +| where (description match "Sauron" OR description match "Dark Lord") AND + (author match "J. R. R. Tolkien" OR author match "John Ronald Reuel Tolkien") +| keep book_no, title, author +| sort book_no +| limit 4 +; -abbrev:keyword | name:text -LGW | London Gatwick -LHR | London Heathrow -LPL | Liverpool John Lennon -LTN | London Luton +book_no:keyword | title:text | author:text +1463 | Realms of Tolkien: Images of Middle-earth | J. R. R. Tolkien +2675 | The Lord of the Rings - Boxed Set | J.R.R. Tolkien +2714 | Return of the King Being the Third Part of The Lord of the Rings | J. R. R. Tolkien +2936 | Fellowship of the Ring 2ND Edition | John Ronald Reuel Tolkien ; multipleWhereWithMatch required_capability: match_operator -from airports | where name match "john" | WHERE name match "St" | keep abbrev, name | sort abbrev; +from books +| where title match "short stories" +| where author match "Ursula K. Le Guin" +| keep book_no, title, author +| sort book_no +; -abbrev:keyword | name:text -YXJ | Fort St. John (N. Peace) +book_no:keyword | title:text | author:text +8480 | The wind's twelve quarters: Short stories | Ursula K. Le Guin ; combinedMatchWithFunctions required_capability: match_operator -from airports -| where name match "john" AND country match "Canada" AND scalerank > 5 -| where length(name) > 10 -| keep abbrev, name, country, scalerank -| sort abbrev +from books +| where title match "Tolkien" AND author match "Tolkien" AND year > 2000 +| where mv_count(author) == 1 +| keep book_no, title, author, year +| sort book_no ; -abbrev:keyword | name:text | country:keyword | scalerank: integer -YHM | John C. Munro Hamilton Int'l | Canada | 8 -YXJ | Fort St. John (N. Peace) | Canada | 8 +book_no:keyword | title:text | author:text | year:integer +5335 | Letters of J R R Tolkien | J.R.R. Tolkien | 2014 ; matchWithStats required_capability: match_operator -from airports -| where name match "john" AND scalerank > 5 -| where length(name) > 10 -| stats count(*) BY type -| sort type +from books +| where author match "faulkner" AND year > 1990 +| where mv_count(author) == 1 +| stats count(*) BY author.keyword +| sort author.keyword ; -count(*): long | type:keyword -1 | major -2 | mid +count(*): long | author.keyword:keyword +1 | Bettilu Stein Faulkner +2 | Colleen Faulkner +1 | Danny Faulkner +1 | Keith Faulkner +1 | Paul Faulkner +8 | William Faulkner ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec index bdc11c78c8f48..2bc2a865c0052 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/match.csv-spec @@ -1,34 +1,47 @@ matchKeywordField required_capability: match_command -from employees | match "first_name: Ma*" | keep emp_no, first_name | sort emp_no; - -emp_no:integer | first_name:keyword -10011 |Mary -10020 |Mayuko -10042 |Magy -10054 |Mayumi -10069 |Margareta +from books | match "author.keyword: *Stein*" | keep book_no, author | sort book_no; + +book_no:keyword | author:text +7381 | Bettilu Stein Faulkner ; -matchMultipleKeywordFields +matchMultipleTextFields required_capability: match_command -from employees | match "+first_name: Ma* +last_name:*man" | keep emp_no, first_name, last_name | sort emp_no; +from books | match "title:Return* AND author:*Tolkien" | keep book_no, title | sort book_no; -emp_no:integer | first_name:keyword | last_name:keyword -10069 |Margareta | Bierman +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +7350 | Return of the Shadow ; -matchTextField +matchAllFields required_capability: match_command -from airports | match "lon*" | keep abbrev, name | sort abbrev; +from books | match "dark AND lord AND Sauron" | keep book_no, title | sort book_no; + +book_no:keyword | title:text +2714 | Return of the King Being the Third Part of The Lord of the Rings +2936 | Fellowship of the Ring 2ND Edition +; + +matchWithWhereFunctionsAndStats +required_capability: match_command + +from books +| match "Faulkner AND ratings:>4.0" +| where year > 1950 and mv_count(author) == 1 +| stats count(*) BY author.keyword +| sort author.keyword +; -abbrev:keyword | name:text -CGQ | Changchun Longjia Int'l -LGW | London Gatwick -LHR | London Heathrow -LTN | London Luton -LYR | Svalbard Longyear +count(*): long | author.keyword:keyword +1 | Bettilu Stein Faulkner +2 | Colleen Faulkner +1 | Danny Faulkner +1 | Keith Faulkner +1 | Paul Faulkner +1 | William Faulkner ; From f152839faf082b5f93ecd718d4c297584c545ffe Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 09:11:14 +0100 Subject: [PATCH 08/46] Remove `InterruptedEx.` from snapshot test harness (#112228) Relates #111957 --- .../snapshots/AbstractSnapshotIntegTestCase.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 1b49209b49c7f..1656a09daa123 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -288,7 +288,7 @@ public static void failReadsAllDataNodes(String repository) { } } - public static void waitForBlockOnAnyDataNode(String repository) throws InterruptedException { + public static void waitForBlockOnAnyDataNode(String repository) { final boolean blocked = waitUntil(() -> { for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { MockRepository mockRepository = (MockRepository) repositoriesService.repository(repository); @@ -475,13 +475,13 @@ protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, Li return createSnapshot(repositoryName, snapshot, indices, Collections.singletonList(NO_FEATURE_STATES_VALUE)); } - protected void createIndexWithRandomDocs(String indexName, int docCount) throws InterruptedException { + protected void createIndexWithRandomDocs(String indexName, int docCount) { createIndex(indexName); ensureGreen(); indexRandomDocs(indexName, docCount); } - protected void indexRandomDocs(String index, int numdocs) throws InterruptedException { + protected void indexRandomDocs(String index, int numdocs) { logger.info("--> indexing [{}] documents into [{}]", numdocs, index); IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs]; for (int i = 0; i < builders.length; i++) { From 9db177887820c2a210aea1c041a88c162754f034 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 27 Aug 2024 09:21:43 +0100 Subject: [PATCH 09/46] Use StreamOutput::writeWriteable instead of writeTo directly (#112027) --- .../lifecycle/action/GetDataStreamLifecycleStatsAction.java | 2 +- .../src/main/java/org/elasticsearch/cluster/ClusterState.java | 2 +- .../elasticsearch/cluster/version/CompatibilityVersions.java | 2 +- .../elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java | 2 +- .../elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java | 2 +- .../org/elasticsearch/xpack/esql/session/Configuration.java | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java index 6e930defd4e0b..71f07c8cac668 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleStatsAction.java @@ -76,7 +76,7 @@ public Response(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(runDuration); out.writeOptionalVLong(timeBetweenStarts); - out.writeCollection(dataStreamStats, (o, v) -> v.writeTo(o)); + out.writeCollection(dataStreamStats, StreamOutput::writeWriteable); } public Long getRunDuration() { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c54269da68507..30e9a9a3779d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -1081,7 +1081,7 @@ public void writeTo(StreamOutput out) throws IOException { routingTable.writeTo(out); nodes.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - out.writeMap(compatibilityVersions, (streamOutput, versions) -> versions.writeTo(streamOutput)); + out.writeMap(compatibilityVersions, StreamOutput::writeWriteable); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { clusterFeatures.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java index c1489afc6c369..8ebb24e86105a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java +++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java @@ -120,7 +120,7 @@ public void writeTo(StreamOutput out) throws IOException { TransportVersion.writeVersion(this.transportVersion(), out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_11_X)) { - out.writeMap(this.systemIndexMappingsVersion(), (o, v) -> v.writeTo(o)); + out.writeMap(this.systemIndexMappingsVersion(), StreamOutput::writeWriteable); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java index c1903a2910629..5a91e997ca5fe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java @@ -108,6 +108,6 @@ public Map getUsageStatsByTier() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(usageStatsByTier, (o, v) -> v.writeTo(o)); + out.writeMap(usageStatsByTier, StreamOutput::writeWriteable); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index e976a8d9be48e..cec4a5a3509a1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -332,7 +332,7 @@ private static class LookupResponse extends TransportResponse { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policies, (o, v) -> v.writeTo(o)); + out.writeMap(policies, StreamOutput::writeWriteable); out.writeMap(failures, StreamOutput::writeString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java index a2777c97e919a..33a48d2e7df05 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/Configuration.java @@ -117,7 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(profile); } if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_REQUEST_TABLES)) { - out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, (o2, column) -> column.writeTo(o2))); + out.writeMap(tables, (o1, columns) -> o1.writeMap(columns, StreamOutput::writeWriteable)); } } From 25fdcd29276c508e5c69f6d855dc97daed8cfc08 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 27 Aug 2024 11:22:52 +0200 Subject: [PATCH 10/46] ES|QL: cache EsField on serialization (#112008) As a follow-up to https://github.com/elastic/elasticsearch/pull/111447, with this change we also cache `EsFields`. This gives us an additional 30-40% reduction on the size of serialized plan, according to [these tests](https://github.com/elastic/elasticsearch/pull/111980) Related to https://github.com/elastic/elasticsearch/issues/111358 --- .../org/elasticsearch/TransportVersions.java | 1 + .../test/AbstractWireSerializingTestCase.java | 9 ++- .../esql/core/expression/FieldAttribute.java | 4 +- .../xpack/esql/core/type/DateEsField.java | 14 ++--- .../xpack/esql/core/type/EsField.java | 61 +++++++++++++------ .../esql/core/type/InvalidMappedField.java | 21 ++----- .../xpack/esql/core/type/KeywordEsField.java | 17 ++---- .../esql/core}/type/MultiTypeEsField.java | 18 ++---- .../xpack/esql/core/type/TextEsField.java | 13 ++-- .../esql/core/type/UnsupportedEsField.java | 15 ++--- .../xpack/esql/core/util/PlanStreamInput.java | 3 + .../esql/core/util/PlanStreamOutput.java | 10 +++ .../xpack/esql/analysis/Analyzer.java | 2 +- .../esql/enrich/EnrichPolicyResolver.java | 13 ++-- .../esql/enrich/ResolvedEnrichPolicy.java | 2 +- .../function/UnsupportedAttribute.java | 12 +++- .../xpack/esql/index/EsIndex.java | 8 +-- .../xpack/esql/io/stream/PlanStreamInput.java | 49 ++++++++++++++- .../esql/io/stream/PlanStreamOutput.java | 38 ++++++++++++ .../planner/EsPhysicalOperationProviders.java | 2 +- .../xpack/esql/plugin/EsqlPlugin.java | 4 -- .../xpack/esql/SerializationTestUtils.java | 2 - .../AbstractExpressionSerializationTests.java | 2 - .../xpack/esql/expression/AliasTests.java | 2 - .../function/AbstractAttributeTestCase.java | 2 - .../function/FieldAttributeTests.java | 2 +- .../function/UnsupportedAttributeTests.java | 2 +- .../esql/index/EsIndexSerializationTests.java | 32 ++++++---- .../esql/io/stream/PlanNamedTypesTests.java | 2 +- .../esql/io/stream/PlanStreamOutputTests.java | 37 ++++++++++- ...AbstractLogicalPlanSerializationTests.java | 2 - ...bstractPhysicalPlanSerializationTests.java | 2 - .../ExchangeSinkExecSerializationTests.java | 12 ++-- .../esql}/type/AbstractEsFieldTypeTests.java | 42 ++++++++++--- .../esql}/type/DataTypeConversionTests.java | 5 +- .../xpack/esql}/type/DateEsFieldTests.java | 5 +- .../xpack/esql}/type/EsFieldTests.java | 5 +- .../esql}/type/InvalidMappedFieldTests.java | 5 +- .../xpack/esql}/type/KeywordEsFieldTests.java | 4 +- .../esql/type/MultiTypeEsFieldTests.java | 16 ++--- .../xpack/esql}/type/TextEsFieldTests.java | 5 +- .../esql}/type/UnsupportedEsFieldTests.java | 5 +- 42 files changed, 336 insertions(+), 171 deletions(-) rename x-pack/plugin/{esql/src/main/java/org/elasticsearch/xpack/esql => esql-core/src/main/java/org/elasticsearch/xpack/esql/core}/type/MultiTypeEsField.java (86%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/AbstractEsFieldTypeTests.java (57%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/DataTypeConversionTests.java (99%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/DateEsFieldTests.java (89%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/EsFieldTests.java (91%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/InvalidMappedFieldTests.java (90%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/KeywordEsFieldTests.java (92%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/TextEsFieldTests.java (90%) rename x-pack/plugin/{esql-core/src/test/java/org/elasticsearch/xpack/esql/core => esql/src/test/java/org/elasticsearch/xpack/esql}/type/UnsupportedEsFieldTests.java (91%) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 78f1b21ea7a44..33f483c57b54e 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -197,6 +197,7 @@ static TransportVersion def(int id) { public static final TransportVersion LTR_SERVERLESS_RELEASE = def(8_727_00_0); public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); + public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java index 82d7f98f34301..b4503a69acca3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractWireSerializingTestCase.java @@ -25,11 +25,18 @@ public abstract class AbstractWireSerializingTestCase exten */ protected abstract Writeable.Reader instanceReader(); + /** + * Returns a {@link Writeable.Writer} that will be used to serialize the instance + */ + protected Writeable.Writer instanceWriter() { + return StreamOutput::writeWriteable; + } + /** * Copy the {@link Writeable} by round tripping it through {@linkplain StreamInput} and {@linkplain StreamOutput}. */ @Override protected final T copyInstance(T instance, TransportVersion version) throws IOException { - return copyWriteable(instance, getNamedWriteableRegistry(), instanceReader(), version); + return copyInstance(instance, getNamedWriteableRegistry(), instanceWriter(), instanceReader(), version); } } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java index 8e8973a11bc8a..37f2cf863d53e 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/expression/FieldAttribute.java @@ -112,7 +112,7 @@ private FieldAttribute(StreamInput in) throws IOException { in.readOptionalWriteable(FieldAttribute::readFrom), in.readString(), DataType.readFrom(in), - in.readNamedWriteable(EsField.class), + EsField.readFrom(in), in.readOptionalString(), in.readEnum(Nullability.class), NameId.readFrom((StreamInput & PlanStreamInput) in), @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(parent); out.writeString(name()); dataType().writeTo(out); - out.writeNamedWriteable(field); + field.writeTo(out); // We used to write the qualifier here. We can still do if needed in the future. out.writeOptionalString(null); out.writeEnum(nullable()); diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java index 01728954a2e1b..f829bcdea94e4 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/DateEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -17,7 +16,6 @@ * Information about a field in an ES index with the {@code date} type */ public class DateEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "DateEsField", DateEsField::new); public static DateEsField dateEsField(String name, Map properties, boolean hasDocValues) { return new DateEsField(name, DataType.DATETIME, properties, hasDocValues); @@ -27,19 +25,19 @@ private DateEsField(String name, DataType dataType, Map propert super(name, dataType, properties, hasDocValues); } - private DateEsField(StreamInput in) throws IOException { - this(in.readString(), DataType.DATETIME, in.readMap(i -> i.readNamedWriteable(EsField.class)), in.readBoolean()); + protected DateEsField(StreamInput in) throws IOException { + this(in.readString(), DataType.DATETIME, in.readImmutableMap(EsField::readFrom), in.readBoolean()); } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "DateEsField"; } + } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java index eb17d720d2140..899986fecd012 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/EsField.java @@ -7,34 +7,40 @@ package org.elasticsearch.xpack.esql.core.type; import org.elasticsearch.TransportVersions; -import org.elasticsearch.common.io.stream.NamedWriteable; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; +import org.elasticsearch.xpack.esql.core.util.PlanStreamInput; +import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import java.io.IOException; -import java.util.List; import java.util.Map; import java.util.Objects; /** * Information about a field in an ES index. */ -public class EsField implements NamedWriteable { - public static List getNamedWriteables() { - return List.of( - EsField.ENTRY, - DateEsField.ENTRY, - InvalidMappedField.ENTRY, - KeywordEsField.ENTRY, - TextEsField.ENTRY, - UnsupportedEsField.ENTRY - ); +public class EsField implements Writeable { + + private static Map> readers = Map.ofEntries( + Map.entry("EsField", EsField::new), + Map.entry("DateEsField", DateEsField::new), + Map.entry("InvalidMappedField", InvalidMappedField::new), + Map.entry("KeywordEsField", KeywordEsField::new), + Map.entry("MultiTypeEsField", MultiTypeEsField::new), + Map.entry("TextEsField", TextEsField::new), + Map.entry("UnsupportedEsField", UnsupportedEsField::new) + ); + + public static Writeable.Reader getReader(String name) { + Reader result = readers.get(name); + if (result == null) { + throw new IllegalArgumentException("Invalid EsField type [" + name + "]"); + } + return result; } - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "EsField", EsField::new); - private final DataType esDataType; private final boolean aggregatable; private final Map properties; @@ -53,10 +59,10 @@ public EsField(String name, DataType esDataType, Map properties this.isAlias = isAlias; } - public EsField(StreamInput in) throws IOException { + protected EsField(StreamInput in) throws IOException { this.name = in.readString(); this.esDataType = readDataType(in); - this.properties = in.readImmutableMap(i -> i.readNamedWriteable(EsField.class)); + this.properties = in.readImmutableMap(EsField::readFrom); this.aggregatable = in.readBoolean(); this.isAlias = in.readBoolean(); } @@ -77,18 +83,33 @@ private DataType readDataType(StreamInput in) throws IOException { return DataType.readFrom(name); } + public static A readFrom(StreamInput in) throws IOException { + return ((PlanStreamInput) in).readEsFieldWithCache(); + } + @Override public void writeTo(StreamOutput out) throws IOException { + if (((PlanStreamOutput) out).writeEsFieldCacheHeader(this)) { + writeContent(out); + } + } + + /** + * This needs to be overridden by subclasses for specific serialization + */ + protected void writeContent(StreamOutput out) throws IOException { out.writeString(name); esDataType.writeTo(out); - out.writeMap(properties, StreamOutput::writeNamedWriteable); + out.writeMap(properties, (o, x) -> x.writeTo(out)); out.writeBoolean(aggregatable); out.writeBoolean(isAlias); } - @Override + /** + * This needs to be overridden by subclasses for specific serialization + */ public String getWriteableName() { - return ENTRY.name; + return "EsField"; } /** diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java index 8b15893f8a056..d34af0f8565c7 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedField.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.QlIllegalArgumentException; @@ -27,11 +26,6 @@ * It is used specifically for the 'union types' feature in ES|QL. */ public class InvalidMappedField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "InvalidMappedField", - InvalidMappedField::new - ); private final String errorMessage; private final Map> typesToIndices; @@ -44,10 +38,6 @@ public InvalidMappedField(String name, String errorMessage) { this(name, errorMessage, new TreeMap<>()); } - public InvalidMappedField(String name) { - this(name, StringUtils.EMPTY, new TreeMap<>()); - } - /** * Constructor supporting union types, used in ES|QL. */ @@ -61,8 +51,8 @@ private InvalidMappedField(String name, String errorMessage, Map i.readNamedWriteable(EsField.class))); + protected InvalidMappedField(StreamInput in) throws IOException { + this(in.readString(), in.readString(), in.readImmutableMap(StreamInput::readString, EsField::readFrom)); } public Set types() { @@ -70,15 +60,14 @@ public Set types() { } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(errorMessage); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "InvalidMappedField"; } public String errorMessage() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java index d856e3d9d8297..33dcebaf3dec2 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/KeywordEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -21,11 +20,6 @@ * Information about a field in an ES index with the {@code keyword} type. */ public class KeywordEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "KeywordEsField", - KeywordEsField::new - ); private final int precision; private final boolean normalized; @@ -63,11 +57,11 @@ protected KeywordEsField( this.normalized = normalized; } - private KeywordEsField(StreamInput in) throws IOException { + public KeywordEsField(StreamInput in) throws IOException { this( in.readString(), KEYWORD, - in.readMap(i -> i.readNamedWriteable(EsField.class)), + in.readImmutableMap(EsField::readFrom), in.readBoolean(), in.readInt(), in.readBoolean(), @@ -76,18 +70,17 @@ private KeywordEsField(StreamInput in) throws IOException { } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); out.writeInt(precision); out.writeBoolean(normalized); out.writeBoolean(isAlias()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "KeywordEsField"; } public int getPrecision() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java similarity index 86% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java rename to x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java index 8b2fc926379f2..81dc77eddcdf8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/MultiTypeEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/MultiTypeEsField.java @@ -5,15 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.type; +package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.core.expression.Expression; -import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.io.IOException; import java.util.HashMap; @@ -31,11 +27,6 @@ * type conversion is done at the data node level. */ public class MultiTypeEsField extends EsField { - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "MultiTypeEsField", - MultiTypeEsField::new - ); private final Map indexToConversionExpressions; @@ -44,21 +35,20 @@ public MultiTypeEsField(String name, DataType dataType, boolean aggregatable, Ma this.indexToConversionExpressions = indexToConversionExpressions; } - public MultiTypeEsField(StreamInput in) throws IOException { + protected MultiTypeEsField(StreamInput in) throws IOException { this(in.readString(), DataType.readFrom(in), in.readBoolean(), in.readImmutableMap(i -> i.readNamedWriteable(Expression.class))); } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(getDataType().typeName()); out.writeBoolean(isAggregatable()); out.writeMap(getIndexToConversionExpressions(), (o, v) -> out.writeNamedWriteable(v)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "MultiTypeEsField"; } public Map getIndexToConversionExpressions() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java index c52230fa65829..0f2f136e74423 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/TextEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Tuple; @@ -23,7 +22,6 @@ * Information about a field in an es index with the {@code text} type. */ public class TextEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(EsField.class, "TextEsField", TextEsField::new); public TextEsField(String name, Map properties, boolean hasDocValues) { this(name, properties, hasDocValues, false); @@ -33,21 +31,20 @@ public TextEsField(String name, Map properties, boolean hasDocV super(name, TEXT, properties, hasDocValues, isAlias); } - private TextEsField(StreamInput in) throws IOException { - this(in.readString(), in.readMap(i -> i.readNamedWriteable(EsField.class)), in.readBoolean(), in.readBoolean()); + protected TextEsField(StreamInput in) throws IOException { + this(in.readString(), in.readImmutableMap(EsField::readFrom), in.readBoolean(), in.readBoolean()); } @Override - public void writeTo(StreamOutput out) throws IOException { + protected void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); out.writeBoolean(isAggregatable()); out.writeBoolean(isAlias()); } - @Override public String getWriteableName() { - return ENTRY.name; + return "TextEsField"; } @Override diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java index 13e4d6ad953a8..13ee2b42a321b 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsField.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.esql.core.type; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,11 +19,6 @@ * All the subfields (properties) of an unsupported type are also be unsupported. */ public class UnsupportedEsField extends EsField { - static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - EsField.class, - "UnsupportedEsField", - UnsupportedEsField::new - ); private final String originalType; private final String inherited; // for fields belonging to parents (or grandparents) that have an unsupported type @@ -40,20 +34,19 @@ public UnsupportedEsField(String name, String originalType, String inherited, Ma } public UnsupportedEsField(StreamInput in) throws IOException { - this(in.readString(), in.readString(), in.readOptionalString(), in.readMap(i -> i.readNamedWriteable(EsField.class))); + this(in.readString(), in.readString(), in.readOptionalString(), in.readImmutableMap(EsField::readFrom)); } @Override - public void writeTo(StreamOutput out) throws IOException { + public void writeContent(StreamOutput out) throws IOException { out.writeString(getName()); out.writeString(getOriginalType()); out.writeOptionalString(getInherited()); - out.writeMap(getProperties(), StreamOutput::writeNamedWriteable); + out.writeMap(getProperties(), (o, x) -> x.writeTo(out)); } - @Override public String getWriteableName() { - return ENTRY.name; + return "UnsupportedEsField"; } public String getOriginalType() { diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java index 01a153feeb473..471c9476ad31d 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamInput.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.io.IOException; @@ -44,4 +45,6 @@ public interface PlanStreamInput { * @throws IOException */ A readAttributeWithCache(CheckedFunction constructor) throws IOException; + + A readEsFieldWithCache() throws IOException; } diff --git a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java index cec68c06e492e..4c30cb66e9f86 100644 --- a/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java +++ b/x-pack/plugin/esql-core/src/main/java/org/elasticsearch/xpack/esql/core/util/PlanStreamOutput.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.core.util; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.io.IOException; @@ -21,4 +22,13 @@ public interface PlanStreamOutput { * @throws IOException */ boolean writeAttributeCacheHeader(Attribute attribute) throws IOException; + + /** + * Writes a cache header for an {@link org.elasticsearch.xpack.esql.core.type.EsField} and caches it if it is not already in the cache. + * In that case, the field will have to serialize itself into this stream immediately after this method call. + * @param field The EsField to serialize + * @return true if the attribute needs to serialize itself, false otherwise (ie. if already cached) + * @throws IOException + */ + boolean writeEsFieldCacheHeader(EsField field) throws IOException; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index 5b59117ad356b..f88c603b4cacb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -43,6 +43,7 @@ import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import org.elasticsearch.xpack.esql.core.util.CollectionUtils; import org.elasticsearch.xpack.esql.core.util.Holder; @@ -80,7 +81,6 @@ import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.stats.FeatureMetric; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index cec4a5a3509a1..f77bfa6d3f862 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -40,6 +40,9 @@ import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.util.StringUtils; import org.elasticsearch.xpack.esql.index.EsIndex; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.session.IndexResolver; @@ -326,14 +329,16 @@ private static class LookupResponse extends TransportResponse { } LookupResponse(StreamInput in) throws IOException { - this.policies = in.readMap(StreamInput::readString, ResolvedEnrichPolicy::new); - this.failures = in.readMap(StreamInput::readString, StreamInput::readString); + PlanStreamInput planIn = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), null); + this.policies = planIn.readMap(StreamInput::readString, ResolvedEnrichPolicy::new); + this.failures = planIn.readMap(StreamInput::readString, StreamInput::readString); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policies, StreamOutput::writeWriteable); - out.writeMap(failures, StreamOutput::writeString); + PlanStreamOutput pso = new PlanStreamOutput(out, new PlanNameRegistry(), null); + pso.writeMap(policies, StreamOutput::writeWriteable); + pso.writeMap(failures, StreamOutput::writeString); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java index 44443973764e6..63f22bd40ac39 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/ResolvedEnrichPolicy.java @@ -29,7 +29,7 @@ public ResolvedEnrichPolicy(StreamInput in) throws IOException { in.readString(), in.readStringCollectionAsList(), in.readMap(StreamInput::readString), - in.readMap(EsField::new) + in.readMap(EsField::readFrom) ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java index 78577aa2b91e0..5961d1c21bb02 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttribute.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.expression.function; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,6 +21,7 @@ import org.elasticsearch.xpack.esql.core.tree.NodeInfo; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import org.elasticsearch.xpack.esql.core.util.PlanStreamOutput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -75,7 +77,9 @@ private UnsupportedAttribute(StreamInput in) throws IOException { this( Source.readFrom((PlanStreamInput) in), in.readString(), - new UnsupportedEsField(in), + in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION) + ? EsField.readFrom(in) + : new UnsupportedEsField(in), in.readOptionalString(), NameId.readFrom((PlanStreamInput) in) ); @@ -86,7 +90,11 @@ public void writeTo(StreamOutput out) throws IOException { if (((PlanStreamOutput) out).writeAttributeCacheHeader(this)) { Source.EMPTY.writeTo(out); out.writeString(name()); - field().writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + field().writeTo(out); + } else { + field().writeContent(out); + } out.writeOptionalString(hasCustomMessage ? message : null); id().writeTo(out); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java index 92fa2f76ec8b2..d368c570a3f76 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/index/EsIndex.java @@ -36,17 +36,13 @@ public EsIndex(String name, Map mapping, Set concreteIn @SuppressWarnings("unchecked") public EsIndex(StreamInput in) throws IOException { - this( - in.readString(), - in.readImmutableMap(StreamInput::readString, i -> i.readNamedWriteable(EsField.class)), - (Set) in.readGenericValue() - ); + this(in.readString(), in.readImmutableMap(StreamInput::readString, EsField::readFrom), (Set) in.readGenericValue()); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name()); - out.writeMap(mapping(), StreamOutput::writeNamedWriteable); + out.writeMap(mapping(), (o, x) -> x.writeTo(out)); out.writeGenericValue(concreteIndices()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index c8e744dfff054..ad66378da5d9e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -28,6 +29,7 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.NameId; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanNamedReader; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanReader; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -65,6 +67,8 @@ public NameId apply(long streamNameId) { private Attribute[] attributesCache = new Attribute[64]; + private EsField[] esFieldsCache = new EsField[64]; + private final PlanNameRegistry registry; // hook for nameId, where can cache and map, for now just return a NameId of the same long value. @@ -239,7 +243,7 @@ private Attribute attributeFromCache(int id) throws IOException { } /** - * Add and attribute to the cache, based on the serialization ID generated by {@link PlanStreamOutput} + * Add an attribute to the cache, based on the serialization ID generated by {@link PlanStreamOutput} * @param id The ID that will reference the attribute. Generated at serialization time * @param attr The attribute to cache */ @@ -250,4 +254,47 @@ private void cacheAttribute(int id, Attribute attr) { } attributesCache[id] = attr; } + + @SuppressWarnings("unchecked") + public A readEsFieldWithCache() throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + // it's safe to cast to int, since the max value for this is {@link PlanStreamOutput#MAX_SERIALIZED_ATTRIBUTES} + int cacheId = Math.toIntExact(readZLong()); + if (cacheId < 0) { + String className = readString(); + Writeable.Reader reader = EsField.getReader(className); + cacheId = -1 - cacheId; + EsField result = reader.read(this); + cacheEsField(cacheId, result); + return (A) result; + } else { + return (A) esFieldFromCache(cacheId); + } + } else { + String className = readString(); + Writeable.Reader reader = EsField.getReader(className); + return (A) reader.read(this); + } + } + + private EsField esFieldFromCache(int id) throws IOException { + if (esFieldsCache[id] == null) { + throw new IOException("Attribute ID not found in serialization cache [" + id + "]"); + } + return esFieldsCache[id]; + } + + /** + * Add an EsField to the cache, based on the serialization ID generated by {@link PlanStreamOutput} + * @param id The ID that will reference the field. Generated at serialization time + * @param field The EsField to cache + */ + private void cacheEsField(int id, EsField field) { + assert id >= 0; + if (id >= esFieldsCache.length) { + esFieldsCache = ArrayUtil.grow(esFieldsCache); + } + esFieldsCache[id] = field; + } + } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index f918621d87a24..d76c61eac05d1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.esql.Column; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Attribute; +import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.Configuration; @@ -62,6 +63,11 @@ public final class PlanStreamOutput extends StreamOutput implements org.elastics */ protected final Map cachedAttributes = new IdentityHashMap<>(); + /** + * Cache for EsFields. + */ + protected final Map cachedEsFields = new IdentityHashMap<>(); + private final StreamOutput delegate; private final PlanNameRegistry registry; @@ -205,6 +211,38 @@ private int cacheAttribute(Attribute attr) { return id; } + @Override + public boolean writeEsFieldCacheHeader(EsField field) throws IOException { + if (getTransportVersion().onOrAfter(TransportVersions.ESQL_ES_FIELD_CACHED_SERIALIZATION)) { + Integer cacheId = esFieldIdFromCache(field); + if (cacheId != null) { + writeZLong(cacheId); + return false; + } + + cacheId = cacheEsField(field); + writeZLong(-1 - cacheId); + } + writeString(field.getWriteableName()); + return true; + } + + private Integer esFieldIdFromCache(EsField field) { + return cachedEsFields.get(field); + } + + private int cacheEsField(EsField attr) { + if (cachedEsFields.containsKey(attr)) { + throw new IllegalArgumentException("EsField already present in the serialization cache [" + attr + "]"); + } + int id = cachedEsFields.size(); + if (id >= maxSerializedAttributes) { + throw new InvalidArgumentException("Limit of the number of serialized EsFields exceeded [{}]", maxSerializedAttributes); + } + cachedEsFields.put(attr, id); + return id; + } + /** * The byte representing a {@link Block} sent for the first time. The byte * will be followed by a {@link StreamOutput#writeVInt} encoded identifier diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 45989b4f563ce..8fddb7407a02a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -52,6 +52,7 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; @@ -60,7 +61,6 @@ import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.DriverParallelism; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index b55c5f604023f..f0686baf68f6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -61,7 +61,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; @@ -70,7 +69,6 @@ import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.IndexResolver; -import org.elasticsearch.xpack.esql.type.MultiTypeEsField; import java.lang.invoke.MethodHandles; import java.util.ArrayList; @@ -193,14 +191,12 @@ public List getNamedWriteables() { entries.add(AsyncOperator.Status.ENTRY); entries.add(EnrichLookupOperator.Status.ENTRY); entries.addAll(Block.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); // TODO combine with above once these are in the same project entries.addAll(NamedExpression.getNamedWriteables()); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); // TODO combine with above once these are in the same project entries.addAll(Expression.getNamedWriteables()); entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); // TODO combine with above once these are in the same project - entries.add(MultiTypeEsField.ENTRY); // TODO combine with EsField.getNamedWriteables() once these are in the same module entries.addAll(EsqlScalarFunction.getNamedWriteables()); entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(LogicalPlan.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index d8de034111865..339e7159ed87d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.EsqlScalarFunction; @@ -119,7 +118,6 @@ public static NamedWriteableRegistry writableRegistry() { entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, RegexpQueryBuilder.NAME, RegexpQueryBuilder::new)); entries.add(new NamedWriteableRegistry.Entry(QueryBuilder.class, ExistsQueryBuilder.NAME, ExistsQueryBuilder::new)); entries.add(SingleValueQuery.ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); entries.addAll(NamedExpression.getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java index 7a00f8ef154ce..596ff2af5fb5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AbstractExpressionSerializationTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; @@ -37,7 +36,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.add(UnsupportedAttribute.ENTRY); entries.add(UnsupportedAttribute.NAMED_EXPRESSION_ENTRY); entries.add(UnsupportedAttribute.EXPRESSION_ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.add(org.elasticsearch.xpack.esql.expression.Order.ENTRY); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java index 36f8b43e69378..2a6791a1f5300 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/AliasTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.tree.SourceTests; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.ReferenceAttributeTests; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; @@ -81,7 +80,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(NamedExpression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java index c625ae5dfb61b..76b813f08d818 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAttributeTestCase.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; @@ -56,7 +55,6 @@ protected final ExtraAttribute mutateInstance(ExtraAttribute instance) { protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(Attribute.getNamedWriteables()); entries.add(UnsupportedAttribute.ENTRY); - entries.addAll(EsField.getNamedWriteables()); return new NamedWriteableRegistry(entries); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java index 03befe66ac28e..8090a20ddc836 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/FieldAttributeTests.java @@ -11,9 +11,9 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.expression.Nullability; import org.elasticsearch.xpack.esql.core.tree.Source; -import org.elasticsearch.xpack.esql.core.type.AbstractEsFieldTypeTests; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.type.AbstractEsFieldTypeTests; public class FieldAttributeTests extends AbstractAttributeTestCase { public static FieldAttribute createFieldAttribute(int maxDepth, boolean onlyRepresentable) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java index 4ab2959b37d29..8e5c098c429db 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/UnsupportedAttributeTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.xpack.esql.core.expression.NameId; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; -import org.elasticsearch.xpack.esql.core.type.UnsupportedEsFieldTests; +import org.elasticsearch.xpack.esql.type.UnsupportedEsFieldTests; public class UnsupportedAttributeTests extends AbstractAttributeTestCase { @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java index e1b56d61a211c..504cf4ec1cd12 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/index/EsIndexSerializationTests.java @@ -8,14 +8,16 @@ package org.elasticsearch.xpack.esql.index; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; -import org.elasticsearch.xpack.esql.core.type.EsFieldTests; import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.type.EsFieldTests; import java.io.IOException; import java.util.HashMap; @@ -56,7 +58,12 @@ private static Set randomConcreteIndices() { @Override protected Writeable.Reader instanceReader() { - return EsIndex::new; + return a -> new EsIndex(new PlanStreamInput(a, new PlanNameRegistry(), a.namedWriteableRegistry(), null)); + } + + @Override + protected Writeable.Writer instanceWriter() { + return (out, idx) -> new PlanStreamOutput(out, new PlanNameRegistry(), null).writeWriteable(idx); } @Override @@ -78,11 +85,6 @@ protected EsIndex mutateInstance(EsIndex instance) throws IOException { return new EsIndex(name, mapping, concreteIndices); } - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(EsField.getNamedWriteables()); - } - /** * Build an {@link EsIndex} with many conflicting fields across many indices. */ @@ -136,7 +138,12 @@ public static EsIndex indexWithManyConflicts(boolean withParent) { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(976591)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(991027)); + /* + * History: + * 953.7kb - shorten error messages for UnsupportedAttributes #111973 + * 967.7kb - cache EsFields #112008 (little overhead of the cache) + */ } /** @@ -144,11 +151,12 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(1921374)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(1374498)); /* * History: * 16.9mb - start * 1.8mb - shorten error messages for UnsupportedAttributes #111973 + * 1.3mb - cache EsFields #112008 */ } @@ -170,8 +178,8 @@ public void testManyTypeConflictsWithParent() throws IOException { *

*/ private void testManyTypeConflicts(boolean withParent, ByteSizeValue expected) throws IOException { - try (BytesStreamOutput out = new BytesStreamOutput()) { - indexWithManyConflicts(withParent).writeTo(out); + try (BytesStreamOutput out = new BytesStreamOutput(); var pso = new PlanStreamOutput(out, new PlanNameRegistry(), null)) { + indexWithManyConflicts(withParent).writeTo(pso); assertThat(ByteSizeValue.ofBytes(out.bytes().length()), byteSizeEquals(expected)); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index a5f2adbc1fc29..e5f195b053349 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -269,7 +269,7 @@ static Nullability randomNullability() { }; } - static EsField randomEsField() { + public static EsField randomEsField() { return randomEsField(0); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java index d169cdb5742af..cdb6c5384e16a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java @@ -259,6 +259,42 @@ public void testWriteDifferentAttributesSameID() throws IOException { } } + public void testWriteMultipleEsFields() throws IOException { + Configuration configuration = randomConfiguration(); + try ( + BytesStreamOutput out = new BytesStreamOutput(); + PlanStreamOutput planStream = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE, configuration) + ) { + List fields = new ArrayList<>(); + int occurrences = randomIntBetween(2, 300); + for (int i = 0; i < occurrences; i++) { + fields.add(PlanNamedTypesTests.randomEsField()); + } + + // send all the EsFields, three times + for (int i = 0; i < 3; i++) { + for (EsField attr : fields) { + attr.writeTo(planStream); + } + } + + try (PlanStreamInput in = new PlanStreamInput(out.bytes().streamInput(), PlanNameRegistry.INSTANCE, REGISTRY, configuration)) { + List readFields = new ArrayList<>(); + for (int i = 0; i < occurrences; i++) { + readFields.add(EsField.readFrom(in)); + assertThat(readFields.get(i), equalTo(fields.get(i))); + } + // two more times + for (int i = 0; i < 2; i++) { + for (int j = 0; j < occurrences; j++) { + EsField attr = EsField.readFrom(in); + assertThat(attr, sameInstance(readFields.get(j))); + } + } + } + } + } + private static Attribute randomAttribute() { return switch (randomInt(3)) { case 0 -> PlanNamedTypesTests.randomFieldAttribute(); @@ -293,7 +329,6 @@ private Column randomColumn() { writeables.addAll(Block.getNamedWriteables()); writeables.addAll(Attribute.getNamedWriteables()); writeables.add(UnsupportedAttribute.ENTRY); - writeables.addAll(EsField.getNamedWriteables()); REGISTRY = new NamedWriteableRegistry(new ArrayList<>(new HashSet<>(writeables))); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java index 8562391b2e3b0..1b9df46a1c842 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/logical/AbstractLogicalPlanSerializationTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.FieldAttributeTests; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; @@ -42,7 +41,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); return new NamedWriteableRegistry(entries); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java index b7b321a022b87..7a0d125ad85ba 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/AbstractPhysicalPlanSerializationTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Node; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.plan.AbstractNodeSerializationTests; @@ -46,7 +45,6 @@ protected final NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java index 237f8d6a9c580..ae58c49eade17 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plan/physical/ExchangeSinkExecSerializationTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.esql.core.expression.NamedExpression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; -import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.esql.index.EsIndex; import org.elasticsearch.xpack.esql.index.EsIndexSerializationTests; @@ -63,7 +62,12 @@ public static Source randomSource() { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflicts() throws IOException { - testManyTypeConflicts(false, ByteSizeValue.ofBytes(2444252)); + testManyTypeConflicts(false, ByteSizeValue.ofBytes(1897374)); + /* + * History: + * 2.3mb - shorten error messages for UnsupportedAttributes #111973 + * 1.8mb - cache EsFields #112008 + */ } /** @@ -71,12 +75,13 @@ public void testManyTypeConflicts() throws IOException { * See {@link #testManyTypeConflicts(boolean, ByteSizeValue)} for more. */ public void testManyTypeConflictsWithParent() throws IOException { - testManyTypeConflicts(true, ByteSizeValue.ofBytes(5885765)); + testManyTypeConflicts(true, ByteSizeValue.ofBytes(3271487)); /* * History: * 2 gb+ - start * 43.3mb - Cache attribute subclasses #111447 * 5.6mb - shorten error messages for UnsupportedAttributes #111973 + * 3.1mb - cache EsFields #112008 */ } @@ -131,7 +136,6 @@ private NamedWriteableRegistry getNamedWriteableRegistry() { entries.addAll(AggregateFunction.getNamedWriteables()); entries.addAll(Expression.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); entries.addAll(Block.getNamedWriteables()); entries.addAll(NamedExpression.getNamedWriteables()); entries.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java similarity index 57% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java index a415c529894c3..9b2bf03b5c8aa 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/AbstractEsFieldTypeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/AbstractEsFieldTypeTests.java @@ -5,16 +5,26 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.AbstractWireTestCase; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; import java.io.IOException; +import java.util.List; import java.util.Map; import java.util.TreeMap; -public abstract class AbstractEsFieldTypeTests extends AbstractNamedWriteableTestCase { +public abstract class AbstractEsFieldTypeTests extends AbstractWireTestCase { public static EsField randomAnyEsField(int maxDepth) { return switch (between(0, 5)) { case 0 -> EsFieldTests.randomEsField(maxDepth); @@ -32,6 +42,25 @@ public static EsField randomAnyEsField(int maxDepth) { protected abstract T mutate(T instance); + @Override + protected EsField copyInstance(EsField instance, TransportVersion version) throws IOException { + NamedWriteableRegistry namedWriteableRegistry = getNamedWriteableRegistry(); + try ( + BytesStreamOutput output = new BytesStreamOutput(); + var pso = new PlanStreamOutput(output, new PlanNameRegistry(), EsqlTestUtils.TEST_CFG) + ) { + pso.setTransportVersion(version); + instance.writeTo(pso); + try ( + StreamInput in1 = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry); + var psi = new PlanStreamInput(in1, new PlanNameRegistry(), in1.namedWriteableRegistry(), EsqlTestUtils.TEST_CFG) + ) { + psi.setTransportVersion(version); + return EsField.readFrom(psi); + } + } + } + /** * Generate sub-properties. * @param maxDepth the maximum number of levels of properties to make @@ -59,11 +88,6 @@ protected final T mutateInstance(EsField instance) throws IOException { @Override protected final NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(EsField.getNamedWriteables()); - } - - @Override - protected final Class categoryClass() { - return EsField.class; + return new NamedWriteableRegistry(List.of()); } } diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java similarity index 99% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java index 929aa1c0eab49..9f8c8f91b7037 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DataTypeConversionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DataTypeConversionTests.java @@ -4,13 +4,16 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.expression.Literal; import org.elasticsearch.xpack.esql.core.tree.Location; import org.elasticsearch.xpack.esql.core.tree.Source; +import org.elasticsearch.xpack.esql.core.type.Converter; +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.DataTypeConverter; import org.elasticsearch.xpack.versionfield.Version; import java.math.BigDecimal; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java similarity index 89% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java index dea03ee8a8cdf..bf0494d5fd043 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/DateEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/DateEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.DateEsField; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java similarity index 91% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java index e72ae0c5c0cda..e824b4de03e26 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/EsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/EsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.DataType; +import org.elasticsearch.xpack.esql.core.type.EsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java similarity index 90% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java index 47a99329d0222..c66088b0695d4 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/InvalidMappedFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/InvalidMappedFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.InvalidMappedField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java similarity index 92% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java index a5d3b8329b2df..ef04f0e27c096 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/KeywordEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/KeywordEsFieldTests.java @@ -5,9 +5,11 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.KeywordEsField; import java.util.Map; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java index 618ca812005f8..d4ca40b75d2f3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/MultiTypeEsFieldTests.java @@ -9,13 +9,14 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.test.AbstractWireTestCase; import org.elasticsearch.xpack.esql.core.expression.Attribute; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.FieldAttribute; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; @@ -57,7 +58,7 @@ * These differences can be minimized once Expression is fully supported in the new serialization approach, and the esql and esql.core * modules are merged, or at least the relevant classes are moved. */ -public class MultiTypeEsFieldTests extends AbstractNamedWriteableTestCase { +public class MultiTypeEsFieldTests extends AbstractWireTestCase { private Configuration config; @@ -94,26 +95,19 @@ protected MultiTypeEsField mutateInstance(MultiTypeEsField instance) throws IOEx protected final NamedWriteableRegistry getNamedWriteableRegistry() { List entries = new ArrayList<>(UnaryScalarFunction.getNamedWriteables()); entries.addAll(Attribute.getNamedWriteables()); - entries.addAll(EsField.getNamedWriteables()); - entries.add(MultiTypeEsField.ENTRY); entries.addAll(Expression.getNamedWriteables()); return new NamedWriteableRegistry(entries); } - @Override - protected final Class categoryClass() { - return MultiTypeEsField.class; - } - @Override protected final MultiTypeEsField copyInstance(MultiTypeEsField instance, TransportVersion version) throws IOException { return copyInstance( instance, getNamedWriteableRegistry(), - (out, v) -> new PlanStreamOutput(out, new PlanNameRegistry(), config).writeNamedWriteable(v), + (out, v) -> v.writeTo(new PlanStreamOutput(out, new PlanNameRegistry(), config)), in -> { PlanStreamInput pin = new PlanStreamInput(in, new PlanNameRegistry(), in.namedWriteableRegistry(), config); - return (MultiTypeEsField) pin.readNamedWriteable(EsField.class); + return EsField.readFrom(pin); }, version ); diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java similarity index 90% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java index 817dd7cd27094..9af3b7376f2b2 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/TextEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/TextEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.TextEsField; import java.util.Map; diff --git a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java similarity index 91% rename from x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java index e05d8ca10425e..a89ca9481b7e1 100644 --- a/x-pack/plugin/esql-core/src/test/java/org/elasticsearch/xpack/esql/core/type/UnsupportedEsFieldTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/type/UnsupportedEsFieldTests.java @@ -5,7 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.core.type; +package org.elasticsearch.xpack.esql.type; + +import org.elasticsearch.xpack.esql.core.type.EsField; +import org.elasticsearch.xpack.esql.core.type.UnsupportedEsField; import java.util.Map; From 73c5c1e1c587cc7ec7ce1f0d10fea49ecfd39002 Mon Sep 17 00:00:00 2001 From: Chris Berkhout Date: Tue, 27 Aug 2024 11:35:53 +0200 Subject: [PATCH 11/46] ByteArrayStreamInput: Return -1 when there are no more bytes to read (#112214) --- docs/changelog/112214.yaml | 5 +++++ .../common/io/stream/ByteArrayStreamInput.java | 6 +++++- .../elasticsearch/common/io/stream/AbstractStreamTests.java | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112214.yaml diff --git a/docs/changelog/112214.yaml b/docs/changelog/112214.yaml new file mode 100644 index 0000000000000..430f95a72bb3f --- /dev/null +++ b/docs/changelog/112214.yaml @@ -0,0 +1,5 @@ +pr: 112214 +summary: '`ByteArrayStreamInput:` Return -1 when there are no more bytes to read' +area: Infra/Core +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java index 838f2998d339f..a27eec4c12061 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/ByteArrayStreamInput.java @@ -120,7 +120,11 @@ public void readBytes(byte[] b, int offset, int len) { @Override public int read(byte[] b, int off, int len) throws IOException { - int toRead = Math.min(len, available()); + final int available = limit - pos; + if (available <= 0) { + return -1; + } + int toRead = Math.min(len, available); readBytes(b, off, toRead); return toRead; } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index 8451d2fd64b9c..b1104a72400ea 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -723,6 +723,7 @@ public void testReadAfterReachingEndOfStream() throws IOException { input.readBytes(new byte[len], 0, len); assertEquals(-1, input.read()); + assertEquals(-1, input.read(new byte[2], 0, 2)); } } From fb32adcb174a7f32338b55737c8273fd962fefdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 27 Aug 2024 14:10:05 +0200 Subject: [PATCH 12/46] Add manage roles privilege (#110633) This PR adds functionality to limit the resources and privileges an Elasticsearch user can grant permissions to when creating a role. This is achieved using a new [global](https://www.elastic.co/guide/en/elasticsearch/reference/current/defining-roles.html) (configurable/request aware) cluster privilege , named `role`, with a sub-key called `manage/indices` which is an array where each entry is a pair of [index patterns](https://docs.google.com/document/d/1VN73C2KpmvvOW85-XGUqMmnMwXrfK4aoxRtG8tPqk7Y/edit#heading=h.z74zwo30t0pf) and [index privileges](https://www.elastic.co/guide/en/elasticsearch/reference/current/security-privileges.html#privileges-list-indices). ## Definition - Using a role with this privilege to create, update or delete roles with privileges on indices outside of the indices matched by the [index pattern](https://docs.google.com/document/d/1VN73C2KpmvvOW85-XGUqMmnMwXrfK4aoxRtG8tPqk7Y/edit#heading=h.z74zwo30t0pf) in the indices array, will fail. - Using a role with this privilege to try to create, update or delete roles with cluster, run_as, etc. privileges will fail. - Using a role with this privilege with restricted indices will fail. - Other broader privileges (such as manage_security) will nullify this privilege. ## Example Create `test-manage` role: ``` POST _security/role/test-manage { "global": { "role": { "manage": { "indices": [ { "names": ["allowed-index-prefix-*"], "privileges":["read"] } ] } } } } ``` And then a user with that role creates a role: ``` POST _security/role/a-test-role { "indices": [ { "names": [ "allowed-index-prefix-some-index" ], "privileges": [ "read" ]}] } ``` But this would fail for: ``` POST _security/role/a-test-role { "indices": [ { "names": [ "not-allowed-index-prefix-some-index" ], "privileges": [ "read" ]}] } ``` ## Backwards compatibility and mixed cluster concerns - A new mapping version has been added to the security index to store the new privilege. - If the new mapping version is not applied and a role descriptor with the new global privilege is written, the write will fail causing an exception. - When sending role descriptors over the transport layer in a mixed cluster, the new global privilege needs to be excluded for older versions. This is hanled with a new transport version. - If a role descriptor is serialized for API keys on one node in a mixed cluster and read from another, an older node might not be able to deserialize it, so it needs to be removed before being written in mixed cluster with old nodes. This is handled in the API key service. - If a role descriptor containing a global privilege is in a put role request in a mixed cluster where it's not supported on all nodes, fail request to create role. - RCS is not applicable here since RCS only considers cluster privileges and index privileges (not global cluster privileges). - This doesn't include remote privileges, since the current use case with connectors doesn't need roles to be created on a cluster separate from the cluster where the search data resides. ## Follow up work - Create a docs PR - Error handling for actions that use manage roles. Should configurable cluster privileges that grant restricted usage of actions be listed in error authorization error messages? --- docs/changelog/110633.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/core/XPackClientPlugin.java | 7 +- .../authz/permission/ClusterPermission.java | 22 ++ .../authz/permission/IndicesPermission.java | 87 ++++- .../core/security/authz/permission/Role.java | 2 +- .../ConfigurableClusterPrivilege.java | 3 +- .../ConfigurableClusterPrivileges.java | 319 +++++++++++++++- .../authz/RoleDescriptorTestHelper.java | 35 +- .../RoleDescriptorsIntersectionTests.java | 5 + .../ConfigurableClusterPrivilegesTests.java | 8 +- .../privilege/ManageRolesPrivilegesTests.java | 351 ++++++++++++++++++ .../security/ManageRolesPrivilegeIT.java | 211 +++++++++++ .../xpack/security/apikey/ApiKeyRestIT.java | 67 ++++ .../xpack/security/authc/ApiKeyService.java | 125 ++++--- .../authz/store/NativeRolesStore.java | 11 +- .../support/SecuritySystemIndices.java | 40 ++ .../audit/logfile/LoggingAuditTrailTests.java | 10 +- .../security/audit/logfile/audited_roles.txt | 4 +- .../RolesBackwardsCompatibilityIT.java | 186 ++++++++-- 20 files changed, 1397 insertions(+), 102 deletions(-) create mode 100644 docs/changelog/110633.yaml create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java create mode 100644 x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java diff --git a/docs/changelog/110633.yaml b/docs/changelog/110633.yaml new file mode 100644 index 0000000000000..d4d1dc68cdbcc --- /dev/null +++ b/docs/changelog/110633.yaml @@ -0,0 +1,5 @@ +pr: 110633 +summary: Add manage roles privilege +area: Authorization +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 33f483c57b54e..582c618216999 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -198,6 +198,7 @@ static TransportVersion def(int id) { public static final TransportVersion ALLOW_PARTIAL_SEARCH_RESULTS_IN_PIT = def(8_728_00_0); public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); + public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a2c3e40c76ae4..2e806a24ad469 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -149,7 +149,7 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetadata.TYPE, TokenMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TokenMetadata.TYPE, TokenMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SECURITY, SecurityFeatureSetUsage::new), - // security : conditional privileges + // security : configurable cluster privileges new NamedWriteableRegistry.Entry( ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.ManageApplicationPrivileges.WRITEABLE_NAME, @@ -160,6 +160,11 @@ public List getNamedWriteables() { ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageRolesPrivilege.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageRolesPrivilege::createFrom + ), // security : role-mappings new NamedWriteableRegistry.Entry(Metadata.Custom.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::readDiffFrom), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java index c70f2a05bfe93..9c41786f39eeb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege; import org.elasticsearch.xpack.core.security.support.Automatons; @@ -17,6 +18,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.function.Function; import java.util.function.Predicate; /** @@ -84,6 +86,16 @@ public static class Builder { private final List actionAutomatons = new ArrayList<>(); private final List permissionChecks = new ArrayList<>(); + private final RestrictedIndices restrictedIndices; + + public Builder(RestrictedIndices restrictedIndices) { + this.restrictedIndices = restrictedIndices; + } + + public Builder() { + this.restrictedIndices = null; + } + public Builder add( final ClusterPrivilege clusterPrivilege, final Set allowedActionPatterns, @@ -110,6 +122,16 @@ public Builder add(final ClusterPrivilege clusterPrivilege, final PermissionChec return this; } + public Builder addWithPredicateSupplier( + final ClusterPrivilege clusterPrivilege, + final Set allowedActionPatterns, + final Function> requestPredicateSupplier + ) { + final Automaton actionAutomaton = createAutomaton(allowedActionPatterns, Set.of()); + Predicate requestPredicate = requestPredicateSupplier.apply(restrictedIndices); + return add(clusterPrivilege, new ActionRequestBasedPermissionCheck(clusterPrivilege, actionAutomaton, requestPredicate)); + } + public ClusterPermission build() { if (clusterPrivileges.isEmpty()) { return NONE; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index d29b1dd67757a..e1b72cc43b38e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl; @@ -86,6 +87,7 @@ public Builder addGroup( public IndicesPermission build() { return new IndicesPermission(restrictedIndices, groups.toArray(Group.EMPTY_ARRAY)); } + } private IndicesPermission(RestrictedIndices restrictedIndices, Group[] groups) { @@ -238,6 +240,21 @@ public boolean check(String action) { return false; } + public boolean checkResourcePrivileges( + Set checkForIndexPatterns, + boolean allowRestrictedIndices, + Set checkForPrivileges, + @Nullable ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder + ) { + return checkResourcePrivileges( + checkForIndexPatterns, + allowRestrictedIndices, + checkForPrivileges, + false, + resourcePrivilegesMapBuilder + ); + } + /** * For given index patterns and index privileges determines allowed privileges and creates an instance of {@link ResourcePrivilegesMap} * holding a map of resource to {@link ResourcePrivileges} where resource is index pattern and the map of index privilege to whether it @@ -246,6 +263,7 @@ public boolean check(String action) { * @param checkForIndexPatterns check permission grants for the set of index patterns * @param allowRestrictedIndices if {@code true} then checks permission grants even for restricted indices by index matching * @param checkForPrivileges check permission grants for the set of index privileges + * @param combineIndexGroups combine index groups to enable checking against regular expressions * @param resourcePrivilegesMapBuilder out-parameter for returning the details on which privilege over which resource is granted or not. * Can be {@code null} when no such details are needed so the method can return early, after * encountering the first privilege that is not granted over some resource. @@ -255,10 +273,13 @@ public boolean checkResourcePrivileges( Set checkForIndexPatterns, boolean allowRestrictedIndices, Set checkForPrivileges, + boolean combineIndexGroups, @Nullable ResourcePrivilegesMap.Builder resourcePrivilegesMapBuilder ) { - final Map predicateCache = new HashMap<>(); boolean allMatch = true; + Map indexGroupAutomatons = indexGroupAutomatons( + combineIndexGroups && checkForIndexPatterns.stream().anyMatch(Automatons::isLuceneRegex) + ); for (String forIndexPattern : checkForIndexPatterns) { Automaton checkIndexAutomaton = Automatons.patterns(forIndexPattern); if (false == allowRestrictedIndices && false == isConcreteRestrictedIndex(forIndexPattern)) { @@ -266,15 +287,14 @@ public boolean checkResourcePrivileges( } if (false == Operations.isEmpty(checkIndexAutomaton)) { Automaton allowedIndexPrivilegesAutomaton = null; - for (Group group : groups) { - final Automaton groupIndexAutomaton = predicateCache.computeIfAbsent(group, Group::getIndexMatcherAutomaton); - if (Operations.subsetOf(checkIndexAutomaton, groupIndexAutomaton)) { + for (var indexAndPrivilegeAutomaton : indexGroupAutomatons.entrySet()) { + if (Operations.subsetOf(checkIndexAutomaton, indexAndPrivilegeAutomaton.getValue())) { if (allowedIndexPrivilegesAutomaton != null) { allowedIndexPrivilegesAutomaton = Automatons.unionAndMinimize( - Arrays.asList(allowedIndexPrivilegesAutomaton, group.privilege().getAutomaton()) + Arrays.asList(allowedIndexPrivilegesAutomaton, indexAndPrivilegeAutomaton.getKey()) ); } else { - allowedIndexPrivilegesAutomaton = group.privilege().getAutomaton(); + allowedIndexPrivilegesAutomaton = indexAndPrivilegeAutomaton.getKey(); } } } @@ -656,6 +676,61 @@ private static boolean containsPrivilegeThatGrantsMappingUpdatesForBwc(Group gro return group.privilege().name().stream().anyMatch(PRIVILEGE_NAME_SET_BWC_ALLOW_MAPPING_UPDATE::contains); } + /** + * Get all automatons for the index groups in this permission and optionally combine the index groups to enable checking if a set of + * index patterns specified using a regular expression grants a set of index privileges. + * + *

An index group is defined as a set of index patterns and a set of privileges (excluding field permissions and DLS queries). + * {@link IndicesPermission} consist of a set of index groups. For non-regular expression privilege checks, an index pattern is checked + * against each index group, to see if it's a sub-pattern of the index pattern for the group and then if that group grants some or all + * of the privileges requested. For regular expressions it's not sufficient to check per group since the index patterns covered by a + * group can be distinct sets and a regular expression can cover several distinct sets. + * + *

For example the two index groups: {"names": ["a"], "privileges": ["read", "create"]} and {"names": ["b"], + * "privileges": ["read","delete"]} will not match on ["\[ab]\"], while a single index group: + * {"names": ["a", "b"], "privileges": ["read"]} will. This happens because the index groups are evaluated against a request index + * pattern without first being combined. In the example above, the two index patterns should be combined to: + * {"names": ["a", "b"], "privileges": ["read"]} before being checked. + * + * + * @param combine combine index groups to allow for checking against regular expressions + * + * @return a map of all index and privilege pattern automatons + */ + private Map indexGroupAutomatons(boolean combine) { + // Map of privilege automaton object references (cached by IndexPrivilege::CACHE) + Map allAutomatons = new HashMap<>(); + for (Group group : groups) { + Automaton indexAutomaton = group.getIndexMatcherAutomaton(); + allAutomatons.compute( + group.privilege().getAutomaton(), + (key, value) -> value == null ? indexAutomaton : Automatons.unionAndMinimize(List.of(value, indexAutomaton)) + ); + if (combine) { + List> combinedAutomatons = new ArrayList<>(); + for (var indexAndPrivilegeAutomatons : allAutomatons.entrySet()) { + Automaton intersectingPrivileges = Operations.intersection( + indexAndPrivilegeAutomatons.getKey(), + group.privilege().getAutomaton() + ); + if (Operations.isEmpty(intersectingPrivileges) == false) { + Automaton indexPatternAutomaton = Automatons.unionAndMinimize( + List.of(indexAndPrivilegeAutomatons.getValue(), indexAutomaton) + ); + combinedAutomatons.add(new Tuple<>(intersectingPrivileges, indexPatternAutomaton)); + } + } + combinedAutomatons.forEach( + automatons -> allAutomatons.compute( + automatons.v1(), + (key, value) -> value == null ? automatons.v2() : Automatons.unionAndMinimize(List.of(value, automatons.v2())) + ) + ); + } + } + return allAutomatons; + } + public static class Group { public static final Group[] EMPTY_ARRAY = new Group[0]; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java index 0fc04e8cc9a52..d8d56a4fbb247 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/Role.java @@ -233,7 +233,7 @@ private Builder(RestrictedIndices restrictedIndices, String[] names) { } public Builder cluster(Set privilegeNames, Iterable configurableClusterPrivileges) { - ClusterPermission.Builder builder = ClusterPermission.builder(); + ClusterPermission.Builder builder = new ClusterPermission.Builder(restrictedIndices); if (privilegeNames.isEmpty() == false) { for (String name : privilegeNames) { builder = ClusterPrivilegeResolver.resolve(name).buildPermission(builder); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java index f9722ca42f20d..edb0cb8f9e79d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilege.java @@ -41,7 +41,8 @@ public interface ConfigurableClusterPrivilege extends NamedWriteable, ToXContent */ enum Category { APPLICATION(new ParseField("application")), - PROFILE(new ParseField("profile")); + PROFILE(new ParseField("profile")), + ROLE(new ParseField("role")); public final ParseField field; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java index fed8b7e0d7a1c..b93aa079a28d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivileges.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.core.security.authz.privilege; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -17,10 +20,21 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.privilege.ApplicationPrivilegesRequest; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataAction; import org.elasticsearch.xpack.core.security.action.profile.UpdateProfileDataRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; +import org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege.Category; import org.elasticsearch.xpack.core.security.support.StringMatcher; import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; @@ -30,12 +44,18 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.TreeMap; +import java.util.function.Function; import java.util.function.Predicate; +import static org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege.DELETE_INDEX; + /** * Static utility class for working with {@link ConfigurableClusterPrivilege} instances */ @@ -43,6 +63,7 @@ public final class ConfigurableClusterPrivileges { public static final ConfigurableClusterPrivilege[] EMPTY_ARRAY = new ConfigurableClusterPrivilege[0]; + private static final Logger logger = LogManager.getLogger(ConfigurableClusterPrivileges.class); public static final Writeable.Reader READER = in1 -> in1.readNamedWriteable( ConfigurableClusterPrivilege.class ); @@ -61,7 +82,16 @@ public static ConfigurableClusterPrivilege[] readArray(StreamInput in) throws IO * Utility method to write an array of {@link ConfigurableClusterPrivilege} objects to a {@link StreamOutput} */ public static void writeArray(StreamOutput out, ConfigurableClusterPrivilege[] privileges) throws IOException { - out.writeArray(WRITER, privileges); + if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + out.writeArray(WRITER, privileges); + } else { + out.writeArray( + WRITER, + Arrays.stream(privileges) + .filter(privilege -> privilege instanceof ManageRolesPrivilege == false) + .toArray(ConfigurableClusterPrivilege[]::new) + ); + } } /** @@ -97,7 +127,7 @@ public static List parse(XContentParser parser) th while (parser.nextToken() != XContentParser.Token.END_OBJECT) { expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); - expectFieldName(parser, Category.APPLICATION.field, Category.PROFILE.field); + expectFieldName(parser, Category.APPLICATION.field, Category.PROFILE.field, Category.ROLE.field); if (Category.APPLICATION.field.match(parser.currentName(), parser.getDeprecationHandler())) { expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { @@ -106,8 +136,7 @@ public static List parse(XContentParser parser) th expectFieldName(parser, ManageApplicationPrivileges.Fields.MANAGE); privileges.add(ManageApplicationPrivileges.parse(parser)); } - } else { - assert Category.PROFILE.field.match(parser.currentName(), parser.getDeprecationHandler()); + } else if (Category.PROFILE.field.match(parser.currentName(), parser.getDeprecationHandler())) { expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); @@ -115,9 +144,16 @@ public static List parse(XContentParser parser) th expectFieldName(parser, WriteProfileDataPrivileges.Fields.WRITE); privileges.add(WriteProfileDataPrivileges.parse(parser)); } + } else if (Category.ROLE.field.match(parser.currentName(), parser.getDeprecationHandler())) { + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); + + expectFieldName(parser, ManageRolesPrivilege.Fields.MANAGE); + privileges.add(ManageRolesPrivilege.parse(parser)); + } } } - return privileges; } @@ -362,4 +398,277 @@ private interface Fields { ParseField APPLICATIONS = new ParseField("applications"); } } + + public static class ManageRolesPrivilege implements ConfigurableClusterPrivilege { + public static final String WRITEABLE_NAME = "manage-roles-privilege"; + private final List indexPermissionGroups; + private final Function> requestPredicateSupplier; + + private static final Set EXPECTED_INDEX_GROUP_FIELDS = Set.of( + Fields.NAMES.getPreferredName(), + Fields.PRIVILEGES.getPreferredName() + ); + + public ManageRolesPrivilege(List manageRolesIndexPermissionGroups) { + this.indexPermissionGroups = manageRolesIndexPermissionGroups; + this.requestPredicateSupplier = (restrictedIndices) -> { + IndicesPermission.Builder indicesPermissionBuilder = new IndicesPermission.Builder(restrictedIndices); + for (ManageRolesIndexPermissionGroup indexPatternPrivilege : manageRolesIndexPermissionGroups) { + indicesPermissionBuilder.addGroup( + IndexPrivilege.get(Set.of(indexPatternPrivilege.privileges())), + FieldPermissions.DEFAULT, + null, + false, + indexPatternPrivilege.indexPatterns() + ); + } + final IndicesPermission indicesPermission = indicesPermissionBuilder.build(); + + return (TransportRequest request) -> { + if (request instanceof final PutRoleRequest putRoleRequest) { + return hasNonIndexPrivileges(putRoleRequest.roleDescriptor()) == false + && Arrays.stream(putRoleRequest.indices()) + .noneMatch( + indexPrivilege -> requestIndexPatternsAllowed( + indicesPermission, + indexPrivilege.getIndices(), + indexPrivilege.getPrivileges() + ) == false + ); + } else if (request instanceof final BulkPutRolesRequest bulkPutRoleRequest) { + return bulkPutRoleRequest.getRoles().stream().noneMatch(ManageRolesPrivilege::hasNonIndexPrivileges) + && bulkPutRoleRequest.getRoles() + .stream() + .allMatch( + roleDescriptor -> Arrays.stream(roleDescriptor.getIndicesPrivileges()) + .noneMatch( + indexPrivilege -> requestIndexPatternsAllowed( + indicesPermission, + indexPrivilege.getIndices(), + indexPrivilege.getPrivileges() + ) == false + ) + ); + } else if (request instanceof final DeleteRoleRequest deleteRoleRequest) { + return requestIndexPatternsAllowed( + indicesPermission, + new String[] { deleteRoleRequest.name() }, + DELETE_INDEX.name().toArray(String[]::new) + ); + } else if (request instanceof final BulkDeleteRolesRequest bulkDeleteRoleRequest) { + return requestIndexPatternsAllowed( + indicesPermission, + bulkDeleteRoleRequest.getRoleNames().toArray(String[]::new), + DELETE_INDEX.name().toArray(String[]::new) + ); + } + throw new IllegalArgumentException("Unsupported request type [" + request.getClass() + "]"); + }; + }; + } + + @Override + public Category getCategory() { + return Category.ROLE; + } + + @Override + public String getWriteableName() { + return WRITEABLE_NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(indexPermissionGroups); + } + + public static ManageRolesPrivilege createFrom(StreamInput in) throws IOException { + final List indexPatternPrivileges = in.readCollectionAsList( + ManageRolesIndexPermissionGroup::createFrom + ); + return new ManageRolesPrivilege(indexPatternPrivileges); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field( + Fields.MANAGE.getPreferredName(), + Map.of(Fields.INDICES.getPreferredName(), indexPermissionGroups.stream().map(indexPatternPrivilege -> { + Map sortedMap = new TreeMap<>(); + sortedMap.put(Fields.NAMES.getPreferredName(), indexPatternPrivilege.indexPatterns()); + sortedMap.put(Fields.PRIVILEGES.getPreferredName(), indexPatternPrivilege.privileges()); + return sortedMap; + }).toList()) + ); + } + + private static void expectedIndexGroupFields(String fieldName, XContentParser parser) { + if (EXPECTED_INDEX_GROUP_FIELDS.contains(fieldName) == false) { + throw new XContentParseException( + parser.getTokenLocation(), + "failed to parse privilege. expected one of " + + Arrays.toString(EXPECTED_INDEX_GROUP_FIELDS.toArray(String[]::new)) + + " but found [" + + fieldName + + "] instead" + ); + } + } + + public static ManageRolesPrivilege parse(XContentParser parser) throws IOException { + expectedToken(parser.currentToken(), parser, XContentParser.Token.FIELD_NAME); + expectFieldName(parser, Fields.MANAGE); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_OBJECT); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + expectFieldName(parser, Fields.INDICES); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + List indexPrivileges = new ArrayList<>(); + Map parsedArraysByFieldName = new HashMap<>(); + + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + expectedToken(token, parser, XContentParser.Token.START_OBJECT); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + String currentFieldName = parser.currentName(); + expectedIndexGroupFields(currentFieldName, parser); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + parsedArraysByFieldName.put(currentFieldName, XContentUtils.readStringArray(parser, false)); + expectedToken(parser.nextToken(), parser, XContentParser.Token.FIELD_NAME); + currentFieldName = parser.currentName(); + expectedIndexGroupFields(currentFieldName, parser); + expectedToken(parser.nextToken(), parser, XContentParser.Token.START_ARRAY); + parsedArraysByFieldName.put(currentFieldName, XContentUtils.readStringArray(parser, false)); + expectedToken(parser.nextToken(), parser, XContentParser.Token.END_OBJECT); + indexPrivileges.add( + new ManageRolesIndexPermissionGroup( + parsedArraysByFieldName.get(Fields.NAMES.getPreferredName()), + parsedArraysByFieldName.get(Fields.PRIVILEGES.getPreferredName()) + ) + ); + } + expectedToken(parser.nextToken(), parser, XContentParser.Token.END_OBJECT); + + for (var indexPrivilege : indexPrivileges) { + if (indexPrivilege.indexPatterns == null || indexPrivilege.indexPatterns.length == 0) { + throw new IllegalArgumentException("Indices privileges must refer to at least one index name or index name pattern"); + } + if (indexPrivilege.privileges == null || indexPrivilege.privileges.length == 0) { + throw new IllegalArgumentException("Indices privileges must define at least one privilege"); + } + } + return new ManageRolesPrivilege(indexPrivileges); + } + + public record ManageRolesIndexPermissionGroup(String[] indexPatterns, String[] privileges) implements Writeable { + public static ManageRolesIndexPermissionGroup createFrom(StreamInput in) throws IOException { + return new ManageRolesIndexPermissionGroup(in.readStringArray(), in.readStringArray()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(indexPatterns); + out.writeStringArray(privileges); + } + + @Override + public String toString() { + return "{" + + Fields.NAMES + + ":" + + Arrays.toString(indexPatterns()) + + ":" + + Fields.PRIVILEGES + + ":" + + Arrays.toString(privileges()) + + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ManageRolesIndexPermissionGroup that = (ManageRolesIndexPermissionGroup) o; + return Arrays.equals(indexPatterns, that.indexPatterns) && Arrays.equals(privileges, that.privileges); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indexPatterns), Arrays.hashCode(privileges)); + } + } + + @Override + public String toString() { + return "{" + + getCategory() + + ":" + + Fields.MANAGE.getPreferredName() + + ":" + + Fields.INDICES.getPreferredName() + + "=[" + + Strings.collectionToDelimitedString(indexPermissionGroups, ",") + + "]}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ManageRolesPrivilege that = (ManageRolesPrivilege) o; + + if (this.indexPermissionGroups.size() != that.indexPermissionGroups.size()) { + return false; + } + + for (int i = 0; i < this.indexPermissionGroups.size(); i++) { + if (Objects.equals(this.indexPermissionGroups.get(i), that.indexPermissionGroups.get(i)) == false) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return Objects.hash(indexPermissionGroups.hashCode()); + } + + @Override + public ClusterPermission.Builder buildPermission(final ClusterPermission.Builder builder) { + return builder.addWithPredicateSupplier( + this, + Set.of(PutRoleAction.NAME, ActionTypes.BULK_PUT_ROLES.name(), ActionTypes.BULK_DELETE_ROLES.name(), DeleteRoleAction.NAME), + requestPredicateSupplier + ); + } + + private static boolean requestIndexPatternsAllowed( + IndicesPermission indicesPermission, + String[] requestIndexPatterns, + String[] privileges + ) { + return indicesPermission.checkResourcePrivileges(Set.of(requestIndexPatterns), false, Set.of(privileges), true, null); + } + + private static boolean hasNonIndexPrivileges(RoleDescriptor roleDescriptor) { + return roleDescriptor.hasApplicationPrivileges() + || roleDescriptor.hasClusterPrivileges() + || roleDescriptor.hasConfigurableClusterPrivileges() + || roleDescriptor.hasRemoteIndicesPrivileges() + || roleDescriptor.hasRemoteClusterPermissions() + || roleDescriptor.hasRunAs() + || roleDescriptor.hasWorkflowsRestriction(); + } + + private interface Fields { + ParseField MANAGE = new ParseField("manage"); + ParseField INDICES = new ParseField("indices"); + ParseField PRIVILEGES = new ParseField("privileges"); + ParseField NAMES = new ParseField("names"); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java index 2d8b62335f4ef..77a37cec45b25 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTestHelper.java @@ -26,6 +26,7 @@ import static org.elasticsearch.test.ESTestCase.generateRandomStringArray; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween; +import static org.elasticsearch.test.ESTestCase.randomArray; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomInt; import static org.elasticsearch.test.ESTestCase.randomIntBetween; @@ -52,6 +53,7 @@ public static RoleDescriptor randomRoleDescriptor() { .allowRestriction(randomBoolean()) .allowDescription(randomBoolean()) .allowRemoteClusters(randomBoolean()) + .allowConfigurableClusterPrivileges(randomBoolean()) .build(); } @@ -69,7 +71,7 @@ public static Map randomRoleDescriptorMetadata(boolean allowRese } public static ConfigurableClusterPrivilege[] randomClusterPrivileges() { - final ConfigurableClusterPrivilege[] configurableClusterPrivileges = switch (randomIntBetween(0, 4)) { + return switch (randomIntBetween(0, 5)) { case 0 -> new ConfigurableClusterPrivilege[0]; case 1 -> new ConfigurableClusterPrivilege[] { new ConfigurableClusterPrivileges.ManageApplicationPrivileges( @@ -93,9 +95,9 @@ public static ConfigurableClusterPrivilege[] randomClusterPrivileges() { new ConfigurableClusterPrivileges.WriteProfileDataPrivileges( Sets.newHashSet(generateRandomStringArray(3, randomIntBetween(4, 12), false, false)) ) }; + case 5 -> randomManageRolesPrivileges(); default -> throw new IllegalStateException("Unexpected value"); }; - return configurableClusterPrivileges; } public static RoleDescriptor.ApplicationResourcePrivileges[] randomApplicationPrivileges() { @@ -119,6 +121,27 @@ public static RoleDescriptor.ApplicationResourcePrivileges[] randomApplicationPr return applicationPrivileges; } + public static ConfigurableClusterPrivilege[] randomManageRolesPrivileges() { + List indexPatternPrivileges = randomList( + 1, + 10, + () -> { + String[] indexPatterns = randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(5, 100)); + + int startIndex = randomIntBetween(0, IndexPrivilege.names().size() - 2); + int endIndex = randomIntBetween(startIndex + 1, IndexPrivilege.names().size()); + + String[] indexPrivileges = IndexPrivilege.names().stream().toList().subList(startIndex, endIndex).toArray(String[]::new); + return new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + indexPatterns, + indexPrivileges + ); + } + ); + + return new ConfigurableClusterPrivilege[] { new ConfigurableClusterPrivileges.ManageRolesPrivilege(indexPatternPrivileges) }; + } + public static RoleDescriptor.RemoteIndicesPrivileges[] randomRemoteIndicesPrivileges(int min, int max) { return randomRemoteIndicesPrivileges(min, max, Set.of()); } @@ -251,6 +274,7 @@ public static class Builder { private boolean allowRestriction = false; private boolean allowDescription = false; private boolean allowRemoteClusters = false; + private boolean allowConfigurableClusterPrivileges = false; public Builder() {} @@ -259,6 +283,11 @@ public Builder allowReservedMetadata(boolean allowReservedMetadata) { return this; } + public Builder allowConfigurableClusterPrivileges(boolean allowConfigurableClusterPrivileges) { + this.allowConfigurableClusterPrivileges = allowConfigurableClusterPrivileges; + return this; + } + public Builder alwaysIncludeRemoteIndices() { this.alwaysIncludeRemoteIndices = true; return this; @@ -302,7 +331,7 @@ public RoleDescriptor build() { randomSubsetOf(ClusterPrivilegeResolver.names()).toArray(String[]::new), randomIndicesPrivileges(0, 3), randomApplicationPrivileges(), - randomClusterPrivileges(), + allowConfigurableClusterPrivileges ? randomClusterPrivileges() : null, generateRandomStringArray(5, randomIntBetween(2, 8), false, true), randomRoleDescriptorMetadata(allowReservedMetadata), Map.of(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java index a892e8b864e6e..b67292e76961f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersectionTests.java @@ -48,6 +48,11 @@ public void testSerialization() throws IOException { ConfigurableClusterPrivilege.class, ConfigurableClusterPrivileges.WriteProfileDataPrivileges.WRITEABLE_NAME, ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom + ), + new NamedWriteableRegistry.Entry( + ConfigurableClusterPrivilege.class, + ConfigurableClusterPrivileges.ManageRolesPrivilege.WRITEABLE_NAME, + ConfigurableClusterPrivileges.ManageRolesPrivilege::createFrom ) ) ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java index c6fac77ea26e6..5599b33fbcfe7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ConfigurableClusterPrivilegesTests.java @@ -61,13 +61,15 @@ public void testGenerateAndParseXContent() throws Exception { } private ConfigurableClusterPrivilege[] buildSecurityPrivileges() { - return switch (randomIntBetween(0, 3)) { + return switch (randomIntBetween(0, 4)) { case 0 -> new ConfigurableClusterPrivilege[0]; case 1 -> new ConfigurableClusterPrivilege[] { ManageApplicationPrivilegesTests.buildPrivileges() }; case 2 -> new ConfigurableClusterPrivilege[] { WriteProfileDataPrivilegesTests.buildPrivileges() }; - case 3 -> new ConfigurableClusterPrivilege[] { + case 3 -> new ConfigurableClusterPrivilege[] { ManageRolesPrivilegesTests.buildPrivileges() }; + case 4 -> new ConfigurableClusterPrivilege[] { ManageApplicationPrivilegesTests.buildPrivileges(), - WriteProfileDataPrivilegesTests.buildPrivileges() }; + WriteProfileDataPrivilegesTests.buildPrivileges(), + ManageRolesPrivilegesTests.buildPrivileges() }; default -> throw new IllegalStateException("Unexpected value"); }; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java new file mode 100644 index 0000000000000..2d47752063d9d --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ManageRolesPrivilegesTests.java @@ -0,0 +1,351 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz.privilege; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractNamedWriteableTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.action.role.BulkDeleteRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.BulkPutRolesRequest; +import org.elasticsearch.xpack.core.security.action.role.DeleteRoleRequest; +import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; +import org.elasticsearch.xpack.core.security.authc.Authentication; +import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; +import org.elasticsearch.xpack.core.security.authz.RestrictedIndices; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; +import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges.ManageRolesPrivilege; +import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; +import org.elasticsearch.xpack.core.security.test.TestRestrictedIndices; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; + +public class ManageRolesPrivilegesTests extends AbstractNamedWriteableTestCase { + + private static final int MIN_INDEX_NAME_LENGTH = 4; + + public void testSimplePutRoleRequest() { + new ReservedRolesStore(); + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), true); + assertAllowedIndexPatterns(permission, randomArray(1, 10, String[]::new, () -> "not-allowed-" + randomAlphaOfLength(5)), false); + assertAllowedIndexPatterns( + permission, + new String[] { "allowed-" + randomAlphaOfLength(5), "not-allowed-" + randomAlphaOfLength(5) }, + false + ); + } + + public void testDeleteRoleRequest() { + new ReservedRolesStore(); + { + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "manage" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), true); + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "not-allowed-" + randomAlphaOfLength(5)), false); + assertAllowedDeleteIndex( + permission, + new String[] { "allowed-" + randomAlphaOfLength(5), "not-allowed-" + randomAlphaOfLength(5) }, + false + ); + } + { + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "read" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + assertAllowedDeleteIndex(permission, randomArray(1, 10, String[]::new, () -> "allowed-" + randomAlphaOfLength(5)), false); + } + } + + public void testSeveralIndexGroupsPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "a", "b" }, new String[] { "read" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "c" }, new String[] { "read" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "d" }, new String[] { "read" }) + ) + ); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[cd]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[acd]/" }, new String[] { "read" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ef]/" }, new String[] { "read" }, false); + } + + public void testPrivilegeIntersectionPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "a", "b" }, new String[] { "all" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "c" }, new String[] { "create" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "d" }, new String[] { "delete" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "e" }, new String[] { "create_doc" }), + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "f" }, new String[] { "read", "manage" }) + ) + ); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "all" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abc]/" }, new String[] { "all" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[ab]/" }, new String[] { "read", "manage" }, true); + + assertAllowedIndexPatterns(permission, new String[] { "/[ac]/" }, new String[] { "create" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ac]/" }, new String[] { "create", "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[abcde]/" }, new String[] { "create_doc" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[ce]/" }, new String[] { "create_doc" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[eb]/" }, new String[] { "create_doc" }, true); + + assertAllowedIndexPatterns(permission, new String[] { "/[d]/" }, new String[] { "delete" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[ad]/" }, new String[] { "delete" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[de]/" }, new String[] { "delete" }, false); + + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "manage" }, true); + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "write" }, false); + assertAllowedIndexPatterns(permission, new String[] { "/[f]/" }, new String[] { "read", "manage" }, true); + } + + public void testEmptyPrivileges() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege(List.of()); + + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "test" }, new String[] { "all" }, false); + } + + public void testRestrictedIndexPutRoleRequest() { + new ReservedRolesStore(); + + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + assertAllowedIndexPatterns(permission, new String[] { "security" }, true); + assertAllowedIndexPatterns(permission, new String[] { ".security" }, false); + assertAllowedIndexPatterns(permission, new String[] { "security", ".security-7" }, false); + } + + public void testGenerateAndParseXContent() throws Exception { + final XContent xContent = randomFrom(XContentType.values()).xContent(); + try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { + final XContentBuilder builder = new XContentBuilder(xContent, out); + + final ManageRolesPrivilege original = buildPrivileges(); + builder.startObject(); + original.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + builder.flush(); + + final byte[] bytes = out.toByteArray(); + try (XContentParser parser = xContent.createParser(XContentParserConfiguration.EMPTY, bytes)) { + assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + final ManageRolesPrivilege clone = ManageRolesPrivilege.parse(parser); + assertThat(parser.nextToken(), equalTo(XContentParser.Token.END_OBJECT)); + + assertThat(clone, equalTo(original)); + assertThat(original, equalTo(clone)); + } + } + } + + public void testPutRoleRequestContainsNonIndexPrivileges() { + new ReservedRolesStore(); + final ManageRolesPrivilege privilege = new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "allowed*" }, new String[] { "all" })) + ); + final ClusterPermission permission = privilege.buildPermission( + new ClusterPermission.Builder(new RestrictedIndices(TestRestrictedIndices.RESTRICTED_INDICES.getAutomaton())) + ).build(); + + final PutRoleRequest putRoleRequest = new PutRoleRequest(); + + switch (randomIntBetween(0, 5)) { + case 0: + putRoleRequest.cluster("all"); + break; + case 1: + putRoleRequest.runAs("test"); + break; + case 2: + putRoleRequest.addApplicationPrivileges( + RoleDescriptor.ApplicationResourcePrivileges.builder() + .privileges("all") + .application("test-app") + .resources("test-resource") + .build() + ); + break; + case 3: + putRoleRequest.addRemoteIndex( + new RoleDescriptor.RemoteIndicesPrivileges.Builder("test-cluster").privileges("all").indices("test*").build() + ); + break; + case 4: + putRoleRequest.putRemoteCluster( + new RemoteClusterPermissions().addGroup( + new RemoteClusterPermissionGroup(new String[] { "monitor_enrich" }, new String[] { "test" }) + ) + ); + break; + case 5: + putRoleRequest.conditionalCluster( + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(new String[] { "test-*" }, new String[] { "read" }) + ) + ) + ); + break; + } + + putRoleRequest.name(randomAlphaOfLength(4)); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/put", putRoleRequest), is(false)); + } + + private static boolean permissionCheck(ClusterPermission permission, String action, ActionRequest request) { + final Authentication authentication = AuthenticationTestHelper.builder().build(); + assertThat(request.validate(), nullValue()); + return permission.check(action, request, authentication); + } + + private static void assertAllowedIndexPatterns(ClusterPermission permission, String[] indexPatterns, boolean expected) { + assertAllowedIndexPatterns(permission, indexPatterns, new String[] { "index", "write", "indices:data/read" }, expected); + } + + private static void assertAllowedIndexPatterns( + ClusterPermission permission, + String[] indexPatterns, + String[] privileges, + boolean expected + ) { + { + final PutRoleRequest putRoleRequest = new PutRoleRequest(); + putRoleRequest.name(randomAlphaOfLength(3)); + putRoleRequest.addIndex(indexPatterns, privileges, null, null, null, false); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/put", putRoleRequest), is(expected)); + } + { + final BulkPutRolesRequest bulkPutRolesRequest = new BulkPutRolesRequest( + List.of( + new RoleDescriptor( + randomAlphaOfLength(3), + new String[] {}, + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices(indexPatterns).privileges(privileges).build() }, + new String[] {} + ) + ) + ); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/bulk_put", bulkPutRolesRequest), is(expected)); + } + } + + private static void assertAllowedDeleteIndex(ClusterPermission permission, String[] indices, boolean expected) { + { + final BulkDeleteRolesRequest bulkDeleteRolesRequest = new BulkDeleteRolesRequest(List.of(indices)); + assertThat(permissionCheck(permission, "cluster:admin/xpack/security/role/bulk_delete", bulkDeleteRolesRequest), is(expected)); + } + { + assertThat(Arrays.stream(indices).allMatch(pattern -> { + final DeleteRoleRequest deleteRolesRequest = new DeleteRoleRequest(); + deleteRolesRequest.name(pattern); + return permissionCheck(permission, "cluster:admin/xpack/security/role/delete", deleteRolesRequest); + }), is(expected)); + } + } + + public static ManageRolesPrivilege buildPrivileges() { + return buildPrivileges(randomIntBetween(MIN_INDEX_NAME_LENGTH, 7)); + } + + private static ManageRolesPrivilege buildPrivileges(int indexNameLength) { + String[] indexNames = Objects.requireNonNull(generateRandomStringArray(5, indexNameLength, false, false)); + + return new ManageRolesPrivilege( + List.of(new ManageRolesPrivilege.ManageRolesIndexPermissionGroup(indexNames, IndexPrivilege.READ.name().toArray(String[]::new))) + ); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + try (var xClientPlugin = new XPackClientPlugin()) { + return new NamedWriteableRegistry(xClientPlugin.getNamedWriteables()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected Class categoryClass() { + return ConfigurableClusterPrivilege.class; + } + + @Override + protected ConfigurableClusterPrivilege createTestInstance() { + return buildPrivileges(); + } + + @Override + protected ConfigurableClusterPrivilege mutateInstance(ConfigurableClusterPrivilege instance) throws IOException { + if (instance instanceof ManageRolesPrivilege) { + return buildPrivileges(MIN_INDEX_NAME_LENGTH - 1); + } + fail(); + return null; + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java new file mode 100644 index 0000000000000..728f068adcae4 --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/ManageRolesPrivilegeIT.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security; + +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; +import org.elasticsearch.xpack.core.security.user.User; +import org.junit.Before; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.core.StringContains.containsString; + +public class ManageRolesPrivilegeIT extends SecurityInBasicRestTestCase { + + private TestSecurityClient adminSecurityClient; + private static final SecureString TEST_PASSWORD = new SecureString("100%-secure-password".toCharArray()); + + @Before + public void setupClient() { + adminSecurityClient = new TestSecurityClient(adminClient()); + } + + public void testManageRoles() throws Exception { + createManageRolesRole("manage-roles-role", new String[0], Set.of("*-allowed-suffix"), Set.of("read", "write")); + createUser("test-user", Set.of("manage-roles-role")); + + String authHeader = basicAuthHeaderValue("test-user", TEST_PASSWORD); + + createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-allowed-suffix").privileges(Set.of("read", "write")).build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + + { + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-suffix-not-allowed").privileges("write").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + + assertThat( + responseException.getMessage(), + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + } + + { + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeader, + new RoleDescriptor( + "manage-roles-role", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("test-allowed-suffix").privileges("manage").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + assertThat( + responseException.getMessage(), + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + } + } + + public void testManageSecurityNullifiesManageRoles() throws Exception { + createManageRolesRole("manage-roles-no-manage-security", new String[0], Set.of("allowed")); + createManageRolesRole("manage-roles-manage-security", new String[] { "manage_security" }, Set.of("allowed")); + + createUser("test-user-no-manage-security", Set.of("manage-roles-no-manage-security")); + createUser("test-user-manage-security", Set.of("manage-roles-manage-security")); + + String authHeaderNoManageSecurity = basicAuthHeaderValue("test-user-no-manage-security", TEST_PASSWORD); + String authHeaderManageSecurity = basicAuthHeaderValue("test-user-manage-security", TEST_PASSWORD); + + createRole( + authHeaderNoManageSecurity, + new RoleDescriptor( + "test-role-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + + ResponseException responseException = assertThrows( + ResponseException.class, + () -> createRole( + authHeaderNoManageSecurity, + new RoleDescriptor( + "test-role-not-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("not-allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ) + ); + + assertThat( + responseException.getMessage(), + // TODO Should the new global role/manage privilege be listed here? Probably not because it's not documented + containsString("this action is granted by the cluster privileges [manage_security,all]") + ); + + createRole( + authHeaderManageSecurity, + new RoleDescriptor( + "test-role-not-allowed-by-manage-roles", + new String[0], + new RoleDescriptor.IndicesPrivileges[] { + RoleDescriptor.IndicesPrivileges.builder().indices("not-allowed").privileges("read").build() }, + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[0], + new String[0], + Map.of(), + Map.of() + ) + ); + } + + private void createRole(String authHeader, RoleDescriptor descriptor) throws IOException { + TestSecurityClient userAuthSecurityClient = new TestSecurityClient( + adminClient(), + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", authHeader).build() + ); + userAuthSecurityClient.putRole(descriptor); + } + + private void createUser(String username, Set roles) throws IOException { + adminSecurityClient.putUser(new User(username, roles.toArray(String[]::new)), TEST_PASSWORD); + } + + private void createManageRolesRole(String roleName, String[] clusterPrivileges, Set indexPatterns) throws IOException { + createManageRolesRole(roleName, clusterPrivileges, indexPatterns, Set.of("read")); + } + + private void createManageRolesRole(String roleName, String[] clusterPrivileges, Set indexPatterns, Set privileges) + throws IOException { + adminSecurityClient.putRole( + new RoleDescriptor( + roleName, + clusterPrivileges, + new RoleDescriptor.IndicesPrivileges[0], + new RoleDescriptor.ApplicationResourcePrivileges[0], + new ConfigurableClusterPrivilege[] { + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + indexPatterns.toArray(String[]::new), + privileges.toArray(String[]::new) + ) + ) + ) }, + new String[0], + Map.of(), + Map.of() + ) + ); + } +} diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java index 5ae84517202d4..667140b849951 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/apikey/ApiKeyRestIT.java @@ -31,6 +31,8 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import org.junit.After; import org.junit.Before; @@ -385,6 +387,50 @@ public void testGrantApiKeyWithOnlyManageOwnApiKeyPrivilegeFails() throws IOExce assertThat(e.getMessage(), containsString("action [" + GrantApiKeyAction.NAME + "] is unauthorized for user")); } + public void testApiKeyWithManageRoles() throws IOException { + RoleDescriptor role = roleWithManageRoles("manage-roles-role", new String[] { "manage_own_api_key" }, "allowed-prefix*"); + getSecurityClient().putRole(role); + createUser("test-user", END_USER_PASSWORD, List.of("manage-roles-role")); + + final Request createApiKeyrequest = new Request("POST", "_security/api_key"); + createApiKeyrequest.setOptions( + RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", basicAuthHeaderValue("test-user", END_USER_PASSWORD)) + ); + final Map requestBody = Map.of( + "name", + "test-api-key", + "role_descriptors", + Map.of( + "test-role", + XContentTestUtils.convertToMap(roleWithManageRoles("test-role", new String[0], "allowed-prefix*")), + "another-test-role", + // This is not allowed by the limited-by-role (creator of the api key), so should not grant access to not-allowed=prefix* + XContentTestUtils.convertToMap(roleWithManageRoles("another-test-role", new String[0], "not-allowed-prefix*")) + ) + ); + + createApiKeyrequest.setJsonEntity(XContentTestUtils.convertToXContent(requestBody, XContentType.JSON).utf8ToString()); + Map responseMap = responseAsMap(client().performRequest(createApiKeyrequest)); + String encodedApiKey = responseMap.get("encoded").toString(); + + final Request createRoleRequest = new Request("POST", "_security/role/test-role"); + createRoleRequest.setOptions(RequestOptions.DEFAULT.toBuilder().addHeader("Authorization", "ApiKey " + encodedApiKey)); + // Allowed role by manage roles permission + { + createRoleRequest.setJsonEntity(""" + {"indices": [{"names": ["allowed-prefix-test"],"privileges": ["read"]}]}"""); + assertOK(client().performRequest(createRoleRequest)); + } + // Not allowed role by manage roles permission + { + createRoleRequest.setJsonEntity(""" + {"indices": [{"names": ["not-allowed-prefix-test"],"privileges": ["read"]}]}"""); + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(createRoleRequest)); + assertEquals(403, e.getResponse().getStatusLine().getStatusCode()); + assertThat(e.getMessage(), containsString("this action is granted by the cluster privileges [manage_security,all]")); + } + } + public void testUpdateApiKey() throws IOException { final var apiKeyName = "my-api-key-name"; final Map apiKeyMetadata = Map.of("not", "returned"); @@ -2393,6 +2439,27 @@ private void createRole(String name, Collection localClusterPrivileges, getSecurityClient().putRole(role); } + private RoleDescriptor roleWithManageRoles(String name, String[] clusterPrivileges, String indexPattern) { + return new RoleDescriptor( + name, + clusterPrivileges, + null, + null, + new ConfigurableClusterPrivilege[] { + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + new String[] { indexPattern }, + new String[] { "read" } + ) + ) + ) }, + null, + null, + null + ); + } + protected void createRoleWithDescription(String name, Collection clusterPrivileges, String description) throws IOException { final RoleDescriptor role = new RoleDescriptor( name, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index d88577f905e96..90566e25b4ea5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -100,6 +100,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilegeResolver; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.RoleReference; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -137,6 +138,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.elasticsearch.TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE; import static org.elasticsearch.TransportVersions.ROLE_REMOTE_CLUSTER_PRIVS; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; @@ -363,29 +365,10 @@ public void createApiKey( listener.onFailure(new IllegalArgumentException("authentication must be provided")); } else { final TransportVersion transportVersion = getMinTransportVersion(); - if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) - && hasRemoteIndices(request.getRoleDescriptors())) { - // Creating API keys with roles which define remote indices privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges for API keys" - ) - ); - return; - } - if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { - // Creating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + ROLE_REMOTE_CLUSTER_PRIVS - + "] or higher to support remote cluster privileges for API keys" - ) - ); + if (validateRoleDescriptorsForMixedCluster(listener, request.getRoleDescriptors(), transportVersion) == false) { return; } + if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && request.getType() == ApiKey.Type.CROSS_CLUSTER) { listener.onFailure( @@ -407,15 +390,63 @@ && hasRemoteIndices(request.getRoleDescriptors())) { return; } - final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); - final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( - userRolesWithoutDescription, + Set filteredRoleDescriptors = filterRoleDescriptorsForMixedCluster( + userRoleDescriptors, transportVersion, request.getId() ); - createApiKeyAndIndexIt(authentication, request, filteredUserRoleDescriptors, listener); + createApiKeyAndIndexIt(authentication, request, filteredRoleDescriptors, listener); + } + } + + private Set filterRoleDescriptorsForMixedCluster( + final Set userRoleDescriptors, + final TransportVersion transportVersion, + final String... apiKeyIds + ) { + final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); + return maybeRemoveRemotePrivileges(userRolesWithoutDescription, transportVersion, apiKeyIds); + } + + private boolean validateRoleDescriptorsForMixedCluster( + final ActionListener listener, + final List roleDescriptors, + final TransportVersion transportVersion + ) { + if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && hasRemoteIndices(roleDescriptors)) { + // API keys with roles which define remote indices privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() + + "] or higher to support remote indices privileges for API keys" + ) + ); + return false; + } + if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(roleDescriptors)) { + // API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ROLE_REMOTE_CLUSTER_PRIVS + + "] or higher to support remote cluster privileges for API keys" + ) + ); + return false; + } + if (transportVersion.before(ADD_MANAGE_ROLES_PRIVILEGE) && hasGlobalManageRolesPrivilege(roleDescriptors)) { + listener.onFailure( + new IllegalArgumentException( + "all nodes must have version [" + + ADD_MANAGE_ROLES_PRIVILEGE + + "] or higher to support the manage roles privilege for API keys" + ) + ); + return false; } + return true; } /** @@ -458,6 +489,13 @@ private static boolean hasRemoteCluster(Collection roleDescripto return roleDescriptors != null && roleDescriptors.stream().anyMatch(RoleDescriptor::hasRemoteClusterPermissions); } + private static boolean hasGlobalManageRolesPrivilege(Collection roleDescriptors) { + return roleDescriptors != null + && roleDescriptors.stream() + .flatMap(roleDescriptor -> Arrays.stream(roleDescriptor.getConditionalClusterPrivileges())) + .anyMatch(privilege -> privilege instanceof ConfigurableClusterPrivileges.ManageRolesPrivilege); + } + private static IllegalArgumentException validateWorkflowsRestrictionConstraints( TransportVersion transportVersion, List requestRoleDescriptors, @@ -594,28 +632,11 @@ public void updateApiKeys( } final TransportVersion transportVersion = getMinTransportVersion(); - if (transportVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) && hasRemoteIndices(request.getRoleDescriptors())) { - // Updating API keys with roles which define remote indices privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion() - + "] or higher to support remote indices privileges for API keys" - ) - ); - return; - } - if (transportVersion.before(ROLE_REMOTE_CLUSTER_PRIVS) && hasRemoteCluster(request.getRoleDescriptors())) { - // Updating API keys with roles which define remote cluster privileges is not allowed in a mixed cluster. - listener.onFailure( - new IllegalArgumentException( - "all nodes must have version [" - + ROLE_REMOTE_CLUSTER_PRIVS - + "] or higher to support remote indices privileges for API keys" - ) - ); + + if (validateRoleDescriptorsForMixedCluster(listener, request.getRoleDescriptors(), transportVersion) == false) { return; } + final Exception workflowsValidationException = validateWorkflowsRestrictionConstraints( transportVersion, request.getRoleDescriptors(), @@ -627,22 +648,22 @@ public void updateApiKeys( } final String[] apiKeyIds = request.getIds().toArray(String[]::new); - final Set userRolesWithoutDescription = removeUserRoleDescriptorDescriptions(userRoleDescriptors); - final Set filteredUserRoleDescriptors = maybeRemoveRemotePrivileges( - userRolesWithoutDescription, - transportVersion, - apiKeyIds - ); if (logger.isDebugEnabled()) { logger.debug("Updating [{}] API keys", buildDelimitedStringWithLimit(10, apiKeyIds)); } + Set filteredRoleDescriptors = filterRoleDescriptorsForMixedCluster( + userRoleDescriptors, + transportVersion, + apiKeyIds + ); + findVersionedApiKeyDocsForSubject( authentication, apiKeyIds, ActionListener.wrap( - versionedDocs -> updateApiKeys(authentication, request, filteredUserRoleDescriptors, versionedDocs, listener), + versionedDocs -> updateApiKeys(authentication, request, filteredRoleDescriptors, versionedDocs, listener), ex -> listener.onFailure(traceLog("bulk update", ex)) ) ); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index a2d2b21b489ea..9ddda193dba39 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -60,6 +60,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissions; +import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult; import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.core.security.support.NativeRealmValidationUtil; @@ -476,7 +477,15 @@ private Exception validateRoleDescriptor(RoleDescriptor role) { + TransportVersions.SECURITY_ROLE_DESCRIPTION.toReleaseVersion() + "] or higher to support specifying role description" ); - } + } else if (Arrays.stream(role.getConditionalClusterPrivileges()) + .anyMatch(privilege -> privilege instanceof ConfigurableClusterPrivileges.ManageRolesPrivilege) + && clusterService.state().getMinTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE)) { + return new IllegalStateException( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ); + } try { DLSRoleQueryValidator.validateQueryField(role.getIndicesPrivileges(), xContentRegistry); } catch (ElasticsearchException | IllegalArgumentException e) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 4c5ce703f48ad..9541dd9dc470d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -36,6 +36,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_PROFILE_ORIGIN; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_VERSION_STRING; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_MANAGE_ROLES_PRIVILEGE; /** * Responsible for handling system indices for the Security plugin @@ -409,6 +410,40 @@ private XContentBuilder getMainIndexMappings(SecurityMainIndexMappingVersion map builder.endObject(); } builder.endObject(); + if (mappingVersion.onOrAfter(ADD_MANAGE_ROLES_PRIVILEGE)) { + builder.startObject("role"); + { + builder.field("type", "object"); + builder.startObject("properties"); + { + builder.startObject("manage"); + { + builder.field("type", "object"); + builder.startObject("properties"); + { + builder.startObject("indices"); + { + builder.startObject("properties"); + { + builder.startObject("names"); + builder.field("type", "keyword"); + builder.endObject(); + builder.startObject("privileges"); + builder.field("type", "keyword"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } } builder.endObject(); } @@ -1050,6 +1085,11 @@ public enum SecurityMainIndexMappingVersion implements VersionId(Arrays.asList("", "\""))), - new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Set.of("\"")) }, + new ConfigurableClusterPrivileges.ManageApplicationPrivileges(Set.of("\"")), + new ConfigurableClusterPrivileges.ManageRolesPrivilege( + List.of( + new ConfigurableClusterPrivileges.ManageRolesPrivilege.ManageRolesIndexPermissionGroup( + new String[] { "test*" }, + new String[] { "read", "write" } + ) + ) + ) }, new String[] { "\"[a]/" }, Map.of(), Map.of() diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt index 7b5e24c97d65a..f913c8608960b 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/audit/logfile/audited_roles.txt @@ -7,6 +7,6 @@ role_descriptor2 role_descriptor3 {"cluster":[],"indices":[],"applications":[{"application":"maps","privileges":["{","}","\n","\\","\""],"resources":["raster:*"]},{"application":"maps","privileges":["*:*"],"resources":["noooooo!!\n\n\f\\\\r","{"]}],"run_as":["jack","nich*","//\""],"metadata":{"some meta":42}} role_descriptor4 -{"cluster":["manage_ml","grant_api_key","manage_rollup"],"global":{"application":{"manage":{"applications":["a+b+|b+a+"]}},"profile":{}},"indices":[{"names":["/. ? + * | { } [ ] ( ) \" \\/","*"],"privileges":["read","read_cross_cluster"],"field_security":{"grant":["almost","all*"],"except":["denied*"]}}],"applications":[],"run_as":["//+a+\"[a]/"],"metadata":{"?list":["e1","e2","*"],"some other meta":{"r":"t"}}} +{"cluster":["manage_ml","grant_api_key","manage_rollup"],"global":{"application":{"manage":{"applications":["a+b+|b+a+"]}},"profile":{},"role":{}},"indices":[{"names":["/. ? + * | { } [ ] ( ) \" \\/","*"],"privileges":["read","read_cross_cluster"],"field_security":{"grant":["almost","all*"],"except":["denied*"]}}],"applications":[],"run_as":["//+a+\"[a]/"],"metadata":{"?list":["e1","e2","*"],"some other meta":{"r":"t"}}} role_descriptor5 -{"cluster":["all"],"global":{"application":{"manage":{"applications":["\""]}},"profile":{"write":{"applications":["","\""]}}},"indices":[],"applications":[],"run_as":["\"[a]/"]} +{"cluster":["all"],"global":{"application":{"manage":{"applications":["\""]}},"profile":{"write":{"applications":["","\""]}},"role":{"manage":{"indices":[{"names":["test*"],"privileges":["read","write"]}]}}},"indices":[],"applications":[],"run_as":["\"[a]/"]} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java index 4f4ff1d5743ee..650779cfbc85d 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RolesBackwardsCompatibilityIT.java @@ -29,6 +29,7 @@ import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomApplicationPrivileges; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomIndicesPrivileges; +import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomManageRolesPrivileges; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTestHelper.randomRoleDescriptorMetadata; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; @@ -40,7 +41,7 @@ public class RolesBackwardsCompatibilityIT extends AbstractUpgradeTestCase { private RestClient oldVersionClient = null; private RestClient newVersionClient = null; - public void testCreatingAndUpdatingRoles() throws Exception { + public void testRolesWithDescription() throws Exception { assumeTrue( "The role description is supported after transport version: " + TransportVersions.SECURITY_ROLE_DESCRIPTION, minimumTransportVersion().before(TransportVersions.SECURITY_ROLE_DESCRIPTION) @@ -48,14 +49,14 @@ public void testCreatingAndUpdatingRoles() throws Exception { switch (CLUSTER_TYPE) { case OLD -> { // Creating role in "old" cluster should succeed when description is not provided - final String initialRole = randomRoleDescriptorSerialized(false); + final String initialRole = randomRoleDescriptorSerialized(); createRole(client(), "my-old-role", initialRole); - updateRole("my-old-role", randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(false))); + updateRole("my-old-role", randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized)); // and fail if we include description var createException = expectThrows( Exception.class, - () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorSerialized(true)) + () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( createException.getMessage(), @@ -65,7 +66,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { RestClient client = client(); var updateException = expectThrows( Exception.class, - () -> updateRole(client, "my-old-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(client, "my-old-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( updateException.getMessage(), @@ -74,17 +75,20 @@ public void testCreatingAndUpdatingRoles() throws Exception { } case MIXED -> { try { - this.createClientsByVersion(); + this.createClientsByVersion(TransportVersions.SECURITY_ROLE_DESCRIPTION); // succeed when role description is not provided - final String initialRole = randomRoleDescriptorSerialized(false); + final String initialRole = randomRoleDescriptorSerialized(); createRole(client(), "my-valid-mixed-role", initialRole); - updateRole("my-valid-mixed-role", randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(false))); + updateRole( + "my-valid-mixed-role", + randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized) + ); // against old node, fail when description is provided either in update or create request { Exception e = expectThrows( Exception.class, - () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -94,7 +98,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -106,7 +110,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -120,7 +124,7 @@ public void testCreatingAndUpdatingRoles() throws Exception { { Exception e = expectThrows( Exception.class, - () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorSerialized(true)) + () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithDescriptionSerialized()) ); assertThat( e.getMessage(), @@ -138,11 +142,129 @@ public void testCreatingAndUpdatingRoles() throws Exception { case UPGRADED -> { // on upgraded cluster which supports new description field // create/update requests should succeed either way (with or without description) - final String initialRole = randomRoleDescriptorSerialized(randomBoolean()); + final String initialRole = randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithDescriptionSerialized()); createRole(client(), "my-valid-upgraded-role", initialRole); updateRole( "my-valid-upgraded-role", - randomValueOtherThan(initialRole, () -> randomRoleDescriptorSerialized(randomBoolean())) + randomValueOtherThan( + initialRole, + () -> randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithDescriptionSerialized()) + ) + ); + } + } + } + + public void testRolesWithManageRoles() throws Exception { + assumeTrue( + "The manage roles privilege is supported after transport version: " + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE, + minimumTransportVersion().before(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE) + ); + switch (CLUSTER_TYPE) { + case OLD -> { + // Creating role in "old" cluster should succeed when manage roles is not provided + final String initialRole = randomRoleDescriptorSerialized(); + createRole(client(), "my-old-role", initialRole); + updateRole("my-old-role", randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized)); + + // and fail if we include manage roles + var createException = expectThrows( + Exception.class, + () -> createRole(client(), "my-invalid-old-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + createException.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + + RestClient client = client(); + var updateException = expectThrows( + Exception.class, + () -> updateRole(client, "my-old-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + updateException.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + case MIXED -> { + try { + this.createClientsByVersion(TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE); + // succeed when role manage roles is not provided + final String initialRole = randomRoleDescriptorSerialized(); + createRole(client(), "my-valid-mixed-role", initialRole); + updateRole( + "my-valid-mixed-role", + randomValueOtherThan(initialRole, RolesBackwardsCompatibilityIT::randomRoleDescriptorSerialized) + ); + + // against old node, fail when manage roles is provided either in update or create request + { + Exception e = expectThrows( + Exception.class, + () -> updateRole(oldVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + { + Exception e = expectThrows( + Exception.class, + () -> createRole(oldVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + allOf(containsString("failed to parse privilege"), containsString("but found [role] instead")) + ); + } + + // and against new node in a mixed cluster we should fail + { + Exception e = expectThrows( + Exception.class, + () -> createRole(newVersionClient, "my-invalid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + + assertThat( + e.getMessage(), + containsString( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ) + ); + } + { + Exception e = expectThrows( + Exception.class, + () -> updateRole(newVersionClient, "my-valid-mixed-role", randomRoleDescriptorWithManageRolesSerialized()) + ); + assertThat( + e.getMessage(), + containsString( + "all nodes must have version [" + + TransportVersions.ADD_MANAGE_ROLES_PRIVILEGE.toReleaseVersion() + + "] or higher to support the manage roles privilege" + ) + ); + } + } finally { + this.closeClientsByVersion(); + } + } + case UPGRADED -> { + // on upgraded cluster which supports new description field + // create/update requests should succeed either way (with or without description) + final String initialRole = randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithManageRolesSerialized()); + createRole(client(), "my-valid-upgraded-role", initialRole); + updateRole( + "my-valid-upgraded-role", + randomValueOtherThan( + initialRole, + () -> randomFrom(randomRoleDescriptorSerialized(), randomRoleDescriptorWithManageRolesSerialized()) + ) ); } } @@ -166,10 +288,22 @@ private void updateRole(RestClient client, String roleName, String payload) thro assertThat(created, equalTo(false)); } - private static String randomRoleDescriptorSerialized(boolean includeDescription) { + private static String randomRoleDescriptorSerialized() { + return randomRoleDescriptorSerialized(false, false); + } + + private static String randomRoleDescriptorWithDescriptionSerialized() { + return randomRoleDescriptorSerialized(true, false); + } + + private static String randomRoleDescriptorWithManageRolesSerialized() { + return randomRoleDescriptorSerialized(false, true); + } + + private static String randomRoleDescriptorSerialized(boolean includeDescription, boolean includeManageRoles) { try { return XContentTestUtils.convertToXContent( - XContentTestUtils.convertToMap(randomRoleDescriptor(includeDescription)), + XContentTestUtils.convertToMap(randomRoleDescriptor(includeDescription, includeManageRoles)), XContentType.JSON ).utf8ToString(); } catch (IOException e) { @@ -177,26 +311,26 @@ private static String randomRoleDescriptorSerialized(boolean includeDescription) } } - private boolean nodeSupportRoleDescription(Map nodeDetails) { + private boolean nodeSupportTransportVersion(Map nodeDetails, TransportVersion transportVersion) { String nodeVersionString = (String) nodeDetails.get("version"); - TransportVersion transportVersion = getTransportVersionWithFallback( + TransportVersion nodeTransportVersion = getTransportVersionWithFallback( nodeVersionString, nodeDetails.get("transport_version"), () -> TransportVersions.ZERO ); - if (transportVersion.equals(TransportVersions.ZERO)) { + if (nodeTransportVersion.equals(TransportVersions.ZERO)) { // In cases where we were not able to find a TransportVersion, a pre-8.8.0 node answered about a newer (upgraded) node. // In that case, the node will be current (upgraded), and remote indices are supported for sure. var nodeIsCurrent = nodeVersionString.equals(Build.current().version()); assertTrue(nodeIsCurrent); return true; } - return transportVersion.onOrAfter(TransportVersions.SECURITY_ROLE_DESCRIPTION); + return nodeTransportVersion.onOrAfter(transportVersion); } - private void createClientsByVersion() throws IOException { - var clientsByCapability = getRestClientByCapability(); + private void createClientsByVersion(TransportVersion transportVersion) throws IOException { + var clientsByCapability = getRestClientByCapability(transportVersion); if (clientsByCapability.size() == 2) { for (Map.Entry client : clientsByCapability.entrySet()) { if (client.getKey() == false) { @@ -224,7 +358,7 @@ private void closeClientsByVersion() throws IOException { } @SuppressWarnings("unchecked") - private Map getRestClientByCapability() throws IOException { + private Map getRestClientByCapability(TransportVersion transportVersion) throws IOException { Response response = client().performRequest(new Request("GET", "_nodes")); assertOK(response); ObjectPath objectPath = ObjectPath.createFromResponse(response); @@ -232,7 +366,7 @@ private Map getRestClientByCapability() throws IOException Map> hostsByCapability = new HashMap<>(); for (Map.Entry entry : nodesAsMap.entrySet()) { Map nodeDetails = (Map) entry.getValue(); - var capabilitySupported = nodeSupportRoleDescription(nodeDetails); + var capabilitySupported = nodeSupportTransportVersion(nodeDetails, transportVersion); Map httpInfo = (Map) nodeDetails.get("http"); hostsByCapability.computeIfAbsent(capabilitySupported, k -> new ArrayList<>()) .add(HttpHost.create((String) httpInfo.get("publish_address"))); @@ -244,7 +378,7 @@ private Map getRestClientByCapability() throws IOException return clientsByCapability; } - private static RoleDescriptor randomRoleDescriptor(boolean includeDescription) { + private static RoleDescriptor randomRoleDescriptor(boolean includeDescription, boolean includeManageRoles) { final Set excludedPrivileges = Set.of( "cross_cluster_replication", "cross_cluster_replication_internal", @@ -255,7 +389,7 @@ private static RoleDescriptor randomRoleDescriptor(boolean includeDescription) { randomSubsetOf(Set.of("all", "monitor", "none")).toArray(String[]::new), randomIndicesPrivileges(0, 3, excludedPrivileges), randomApplicationPrivileges(), - null, + includeManageRoles ? randomManageRolesPrivileges() : null, generateRandomStringArray(5, randomIntBetween(2, 8), false, true), randomRoleDescriptorMetadata(false), Map.of(), From f150e2c11df0fe3bef298c55bd867437e50f5f73 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 27 Aug 2024 14:34:02 +0100 Subject: [PATCH 13/46] Add telemetry for repository usage (#112133) Adds to the `GET _cluster/stats` endpoint information about the snapshot repositories in use, including their types, whether they are read-only or read-write, and for Azure repositories the kind of credentials in use. --- docs/changelog/112133.yaml | 5 ++ docs/reference/cluster/stats.asciidoc | 31 +++++++++- .../repositories/azure/AzureRepository.java | 6 ++ .../azure/AzureStorageService.java | 12 ++++ .../azure/AzureStorageSettings.java | 12 ++++ .../test/repository_azure/20_repository.yml | 13 ++++ .../test/repository_gcs/20_repository.yml | 13 ++++ .../20_repository_permanent_credentials.yml | 13 ++++ .../30_repository_temporary_credentials.yml | 13 ++++ .../40_repository_ec2_credentials.yml | 13 ++++ .../50_repository_ecs_credentials.yml | 13 ++++ .../60_repository_sts_credentials.yml | 13 ++++ server/src/main/java/module-info.java | 1 + .../org/elasticsearch/TransportVersions.java | 2 + .../stats/ClusterStatsNodeResponse.java | 36 ++++++----- .../cluster/stats/ClusterStatsResponse.java | 12 ++++ .../cluster/stats/RepositoryUsageStats.java | 59 +++++++++++++++++++ .../stats/TransportClusterStatsAction.java | 19 ++++-- .../cluster/health/ClusterHealthStatus.java | 2 +- .../repositories/RepositoriesFeatures.java | 23 ++++++++ .../repositories/RepositoriesService.java | 27 +++++++-- .../repositories/Repository.java | 8 +++ .../blobstore/BlobStoreRepository.java | 25 ++++++++ ...lasticsearch.features.FeatureSpecification | 1 + .../cluster/stats/VersionStatsTests.java | 3 +- .../ClusterStatsMonitoringDocTests.java | 25 ++++---- .../AzureRepositoryAnalysisRestIT.java | 37 ++++++++++++ 27 files changed, 400 insertions(+), 37 deletions(-) create mode 100644 docs/changelog/112133.yaml create mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java create mode 100644 server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java diff --git a/docs/changelog/112133.yaml b/docs/changelog/112133.yaml new file mode 100644 index 0000000000000..11109402b7373 --- /dev/null +++ b/docs/changelog/112133.yaml @@ -0,0 +1,5 @@ +pr: 112133 +summary: Add telemetry for repository usage +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 3b429ef427071..c39bc0dcd2878 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -1282,6 +1282,31 @@ They are included here for expert users, but should otherwise be ignored. ===== +==== + +`repositories`:: +(object) Contains statistics about the <> repositories defined in the cluster, broken down +by repository type. ++ +.Properties of `repositories` +[%collapsible%open] +===== + +`count`::: +(integer) The number of repositories of this type in the cluster. + +`read_only`::: +(integer) The number of repositories of this type in the cluster which are registered read-only. + +`read_write`::: +(integer) The number of repositories of this type in the cluster which are not registered as read-only. + +Each repository type may also include other statistics about the repositories of that type here. + +===== + +==== + [[cluster-stats-api-example]] ==== {api-examples-title} @@ -1579,6 +1604,9 @@ The API returns the following response: }, "snapshots": { ... + }, + "repositories": { + ... } } -------------------------------------------------- @@ -1589,6 +1617,7 @@ The API returns the following response: // TESTRESPONSE[s/"count": \{[^\}]*\}/"count": $body.$_path/] // TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] // TESTRESPONSE[s/"snapshots": \{[^\}]*\}/"snapshots": $body.$_path/] +// TESTRESPONSE[s/"repositories": \{[^\}]*\}/"repositories": $body.$_path/] // TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/] // TESTRESPONSE[s/"runtime_field_types": \[[^\]]*\]/"runtime_field_types": $body.$_path/] // TESTRESPONSE[s/"search": \{[^\}]*\}/"search": $body.$_path/] @@ -1600,7 +1629,7 @@ The API returns the following response: // the plugins that will be in it. And because we figure folks don't need to // see an exhaustive list anyway. // 2. Similarly, ignore the contents of `network_types`, `discovery_types`, -// `packaging_types` and `snapshots`. +// `packaging_types`, `snapshots` and `repositories`. // 3. Ignore the contents of the (nodes) count object, as what's shown here // depends on the license. Voting-only nodes are e.g. only shown when this // test runs with a basic license. diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 388474acc75ea..c8c0b15db5ebe 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -26,6 +26,7 @@ import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.Function; import static org.elasticsearch.core.Strings.format; @@ -175,4 +176,9 @@ protected ByteSizeValue chunkSize() { public boolean isReadOnly() { return readonly; } + + @Override + protected Set getExtraUsageFeatures() { + return storageService.getExtraUsageFeatures(Repository.CLIENT_NAME.get(getMetadata().settings())); + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 0d6cd7bf3d246..09088004759a8 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -24,6 +24,7 @@ import java.net.Proxy; import java.net.URL; import java.util.Map; +import java.util.Set; import java.util.function.BiConsumer; import static java.util.Collections.emptyMap; @@ -165,4 +166,15 @@ public void refreshSettings(Map clientsSettings) { this.storageSettings = Map.copyOf(clientsSettings); // clients are built lazily by {@link client(String, LocationMode)} } + + /** + * For Azure repositories, we report the different kinds of credentials in use in the telemetry. + */ + public Set getExtraUsageFeatures(String clientName) { + try { + return getClientSettings(clientName).credentialsUsageFeatures(); + } catch (Exception e) { + return Set.of(); + } + } } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index b3e8dd8898bea..2333a1fdb9e93 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Set; final class AzureStorageSettings { @@ -130,6 +131,7 @@ final class AzureStorageSettings { private final int maxRetries; private final Proxy proxy; private final boolean hasCredentials; + private final Set credentialsUsageFeatures; private AzureStorageSettings( String account, @@ -150,6 +152,12 @@ private AzureStorageSettings( this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; + this.credentialsUsageFeatures = Strings.hasText(key) ? Set.of("uses_key_credentials") + : Strings.hasText(sasToken) ? Set.of("uses_sas_token") + : SocketAccess.doPrivilegedException(() -> System.getenv("AZURE_FEDERATED_TOKEN_FILE")) == null + ? Set.of("uses_default_credentials", "uses_managed_identity") + : Set.of("uses_default_credentials", "uses_workload_identity"); + // Register the proxy if we have any // Validate proxy settings if (proxyType.equals(Proxy.Type.DIRECT) && ((proxyPort != 0) || Strings.hasText(proxyHost))) { @@ -366,4 +374,8 @@ private String deriveURIFromSettings(boolean isPrimary) { throw new IllegalArgumentException(e); } } + + public Set credentialsUsageFeatures() { + return credentialsUsageFeatures; + } } diff --git a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml index 299183f26d9dc..a4a7d0b22a0ed 100644 --- a/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml +++ b/modules/repository-azure/src/yamlRestTest/resources/rest-api-spec/test/repository_azure/20_repository.yml @@ -235,6 +235,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.azure.count: 1 } + - gte: { repositories.azure.read_write: 1 } + --- teardown: diff --git a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml index 68d61be4983c5..e8c34a4b6a20b 100644 --- a/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml +++ b/modules/repository-gcs/src/yamlRestTest/resources/rest-api-spec/test/repository_gcs/20_repository.yml @@ -232,6 +232,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.gcs.count: 1 } + - gte: { repositories.gcs.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 77870697f93ae..e88a0861ec01c 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -345,6 +345,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 4a62d6183470d..501af980e17e3 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index e24ff1ad0e559..129f0ba5d7588 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 9c332cc7d9301..de334b4b3df96 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -256,6 +256,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml index 24c2b2b1741d6..09a8526017960 100644 --- a/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml +++ b/modules/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_sts_credentials.yml @@ -257,6 +257,19 @@ setup: snapshot: missing wait_for_completion: true +--- +"Usage stats": + - requires: + cluster_features: + - repositories.supports_usage_stats + reason: requires this feature + + - do: + cluster.stats: {} + + - gte: { repositories.s3.count: 1 } + - gte: { repositories.s3.read_write: 1 } + --- teardown: diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index c223db531e688..d412748ed4e57 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -429,6 +429,7 @@ org.elasticsearch.cluster.metadata.MetadataFeatures, org.elasticsearch.rest.RestFeatures, org.elasticsearch.indices.IndicesFeatures, + org.elasticsearch.repositories.RepositoriesFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.index.mapper.MapperFeatures, org.elasticsearch.ingest.IngestGeoIpFeatures, diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 582c618216999..41fa34bb5a4a3 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -199,6 +199,8 @@ static TransportVersion def(int id) { public static final TransportVersion RANK_DOCS_RETRIEVER = def(8_729_00_0); public static final TransportVersion ESQL_ES_FIELD_CACHED_SERIALIZATION = def(8_730_00_0); public static final TransportVersion ADD_MANAGE_ROLES_PRIVILEGE = def(8_731_00_0); + public static final TransportVersion REPOSITORIES_TELEMETRY = def(8_732_00_0); + /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index d74889b623589..b48295dc8b3eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -20,29 +20,33 @@ import org.elasticsearch.core.Nullable; import java.io.IOException; +import java.util.Objects; public class ClusterStatsNodeResponse extends BaseNodeResponse { private final NodeInfo nodeInfo; private final NodeStats nodeStats; private final ShardStats[] shardsStats; - private ClusterHealthStatus clusterStatus; + private final ClusterHealthStatus clusterStatus; private final SearchUsageStats searchUsageStats; + private final RepositoryUsageStats repositoryUsageStats; public ClusterStatsNodeResponse(StreamInput in) throws IOException { super(in); - clusterStatus = null; - if (in.readBoolean()) { - clusterStatus = ClusterHealthStatus.readFrom(in); - } + this.clusterStatus = in.readOptionalWriteable(ClusterHealthStatus::readFrom); this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); - shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); + this.shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats = new SearchUsageStats(in); } else { searchUsageStats = new SearchUsageStats(); } + if (in.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats = RepositoryUsageStats.readFrom(in); + } else { + repositoryUsageStats = RepositoryUsageStats.EMPTY; + } } public ClusterStatsNodeResponse( @@ -51,14 +55,16 @@ public ClusterStatsNodeResponse( NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats, - SearchUsageStats searchUsageStats + SearchUsageStats searchUsageStats, + RepositoryUsageStats repositoryUsageStats ) { super(node); this.nodeInfo = nodeInfo; this.nodeStats = nodeStats; this.shardsStats = shardsStats; this.clusterStatus = clusterStatus; - this.searchUsageStats = searchUsageStats; + this.searchUsageStats = Objects.requireNonNull(searchUsageStats); + this.repositoryUsageStats = Objects.requireNonNull(repositoryUsageStats); } public NodeInfo nodeInfo() { @@ -85,20 +91,22 @@ public SearchUsageStats searchUsageStats() { return searchUsageStats; } + public RepositoryUsageStats repositoryUsageStats() { + return repositoryUsageStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (clusterStatus == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeByte(clusterStatus.value()); - } + out.writeOptionalWriteable(clusterStatus); nodeInfo.writeTo(out); nodeStats.writeTo(out); out.writeArray(shardsStats); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats.writeTo(out); } + if (out.getTransportVersion().onOrAfter(TransportVersions.REPOSITORIES_TELEMETRY)) { + repositoryUsageStats.writeTo(out); + } // else just drop these stats, ok for bwc } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 36e7b247befac..b6dd40e8c8b79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -30,6 +30,7 @@ public class ClusterStatsResponse extends BaseNodesResponse r.isEmpty() == false) + // stats should be the same on every node so just pick one of them + .findAny() + .orElse(RepositoryUsageStats.EMPTY); } public String getClusterUUID() { @@ -113,6 +122,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("snapshots"); clusterSnapshotStats.toXContent(builder, params); + builder.field("repositories"); + repositoryUsageStats.toXContent(builder, params); + return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java new file mode 100644 index 0000000000000..771aa0fbef842 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RepositoryUsageStats.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; + +/** + * Stats on repository feature usage exposed in cluster stats for telemetry. + * + * @param statsByType a count of the repositories using various named features, keyed by repository type and then by feature name. + */ +public record RepositoryUsageStats(Map> statsByType) implements Writeable, ToXContentObject { + + public static final RepositoryUsageStats EMPTY = new RepositoryUsageStats(Map.of()); + + public static RepositoryUsageStats readFrom(StreamInput in) throws IOException { + final var statsByType = in.readMap(i -> i.readMap(StreamInput::readVLong)); + if (statsByType.isEmpty()) { + return EMPTY; + } else { + return new RepositoryUsageStats(statsByType); + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(statsByType, (o, m) -> o.writeMap(m, StreamOutput::writeVLong)); + } + + public boolean isEmpty() { + return statsByType.isEmpty(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (Map.Entry> typeAndStats : statsByType.entrySet()) { + builder.startObject(typeAndStats.getKey()); + for (Map.Entry statAndValue : typeAndStats.getValue().entrySet()) { + builder.field(statAndValue.getKey(), statAndValue.getValue()); + } + builder.endObject(); + } + return builder.endObject(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index bcf49bca421f6..1912de3cfa4d2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -41,6 +41,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.node.NodeService; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -78,6 +79,7 @@ public class TransportClusterStatsAction extends TransportNodesAction< private final NodeService nodeService; private final IndicesService indicesService; + private final RepositoriesService repositoriesService; private final SearchUsageHolder searchUsageHolder; private final MetadataStatsCache mappingStatsCache; @@ -90,6 +92,7 @@ public TransportClusterStatsAction( TransportService transportService, NodeService nodeService, IndicesService indicesService, + RepositoriesService repositoriesService, UsageService usageService, ActionFilters actionFilters ) { @@ -103,6 +106,7 @@ public TransportClusterStatsAction( ); this.nodeService = nodeService; this.indicesService = indicesService; + this.repositoriesService = repositoriesService; this.searchUsageHolder = usageService.getSearchUsageHolder(); this.mappingStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), MappingStats::of); this.analysisStatsCache = new MetadataStatsCache<>(threadPool.getThreadContext(), AnalysisStats::of); @@ -237,12 +241,14 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq } } - ClusterHealthStatus clusterStatus = null; - if (clusterService.state().nodes().isLocalNodeElectedMaster()) { - clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); - } + final ClusterState clusterState = clusterService.state(); + final ClusterHealthStatus clusterStatus = clusterState.nodes().isLocalNodeElectedMaster() + ? new ClusterStateHealth(clusterState).getStatus() + : null; + + final SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); - SearchUsageStats searchUsageStats = searchUsageHolder.getSearchUsageStats(); + final RepositoryUsageStats repositoryUsageStats = repositoriesService.getUsageStats(); return new ClusterStatsNodeResponse( nodeInfo.getNode(), @@ -250,7 +256,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]), - searchUsageStats + searchUsageStats, + repositoryUsageStats ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java index d025ddab26af6..c53395b5d76c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/health/ClusterHealthStatus.java @@ -19,7 +19,7 @@ public enum ClusterHealthStatus implements Writeable { YELLOW((byte) 1), RED((byte) 2); - private byte value; + private final byte value; ClusterHealthStatus(byte value) { this.value = value; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java new file mode 100644 index 0000000000000..141dac0c5c430 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesFeatures.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Set; + +public class RepositoriesFeatures implements FeatureSpecification { + public static final NodeFeature SUPPORTS_REPOSITORIES_USAGE_STATS = new NodeFeature("repositories.supports_usage_stats"); + + @Override + public Set getFeatures() { + return Set.of(SUPPORTS_REPOSITORIES_USAGE_STATS); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index de4ae1051ba62..732a18dffe233 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.stats.RepositoryUsageStats; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.node.NodeClient; @@ -944,15 +945,33 @@ public List> getPreRestoreVersionChecks() { return preRestoreChecks; } - @Override - protected void doStart() { + public static String COUNT_USAGE_STATS_NAME = "count"; + public RepositoryUsageStats getUsageStats() { + if (repositories.isEmpty()) { + return RepositoryUsageStats.EMPTY; + } + final var statsByType = new HashMap>(); + for (final var repository : repositories.values()) { + final var repositoryType = repository.getMetadata().type(); + final var typeStats = statsByType.computeIfAbsent(repositoryType, ignored -> new HashMap<>()); + typeStats.compute(COUNT_USAGE_STATS_NAME, (k, count) -> (count == null ? 0L : count) + 1); + final var repositoryUsageTags = repository.getUsageFeatures(); + assert repositoryUsageTags.contains(COUNT_USAGE_STATS_NAME) == false : repositoryUsageTags; + for (final var repositoryUsageTag : repositoryUsageTags) { + typeStats.compute(repositoryUsageTag, (k, count) -> (count == null ? 0L : count) + 1); + } + } + return new RepositoryUsageStats( + statsByType.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> Map.copyOf(e.getValue()))) + ); } @Override - protected void doStop() { + protected void doStart() {} - } + @Override + protected void doStop() {} @Override protected void doClose() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index fd52c21cad3f8..09f4782b6e5fa 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -312,6 +312,14 @@ void cloneShardSnapshot( */ void awaitIdle(); + /** + * @return a set of the names of the features that this repository instance uses, for reporting in the cluster stats for telemetry + * collection. + */ + default Set getUsageFeatures() { + return Set.of(); + } + static boolean assertSnapshotMetaThread() { return ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SNAPSHOT_META); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e8af752bec179..cc56e940530e8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -3943,4 +3943,29 @@ public String getAnalysisFailureExtraDetail() { ReferenceDocs.SNAPSHOT_REPOSITORY_ANALYSIS ); } + + public static final String READ_ONLY_USAGE_STATS_NAME = "read_only"; + public static final String READ_WRITE_USAGE_STATS_NAME = "read_write"; + + @Override + public final Set getUsageFeatures() { + final var extraUsageFeatures = getExtraUsageFeatures(); + assert extraUsageFeatures.contains(READ_ONLY_USAGE_STATS_NAME) == false : extraUsageFeatures; + assert extraUsageFeatures.contains(READ_WRITE_USAGE_STATS_NAME) == false : extraUsageFeatures; + return Set.copyOf( + Stream.concat(Stream.of(isReadOnly() ? READ_ONLY_USAGE_STATS_NAME : READ_WRITE_USAGE_STATS_NAME), extraUsageFeatures.stream()) + .toList() + ); + } + + /** + * All blob-store repositories include the counts of read-only and read-write repositories in their telemetry. This method returns other + * features of the repositories in use. + * + * @return a set of the names of the extra features that this repository instance uses, for reporting in the cluster stats for telemetry + * collection. + */ + protected Set getExtraUsageFeatures() { + return Set.of(); + } } diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification index baf7e53345944..90a1c29972ff3 100644 --- a/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -13,6 +13,7 @@ org.elasticsearch.cluster.service.TransportFeatures org.elasticsearch.cluster.metadata.MetadataFeatures org.elasticsearch.rest.RestFeatures org.elasticsearch.indices.IndicesFeatures +org.elasticsearch.repositories.RepositoriesFeatures org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures org.elasticsearch.index.mapper.MapperFeatures org.elasticsearch.ingest.IngestGeoIpFeatures diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java index 49528c204b042..20eae9833e4b0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/VersionStatsTests.java @@ -127,7 +127,8 @@ public void testCreation() { null, null, new ShardStats[] { shardStats }, - null + new SearchUsageStats(), + RepositoryUsageStats.EMPTY ); stats = VersionStats.of(metadata, Collections.singletonList(nodeResponse)); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index c89638045a5a8..4a695f7c51e4c 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsNodeResponse; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse; import org.elasticsearch.action.admin.cluster.stats.MappingStats; +import org.elasticsearch.action.admin.cluster.stats.RepositoryUsageStats; import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats; import org.elasticsearch.action.admin.cluster.stats.VersionStats; import org.elasticsearch.action.admin.indices.stats.CommonStats; @@ -420,6 +421,7 @@ public void testToXContent() throws IOException { when(mockNodeResponse.nodeStats()).thenReturn(mockNodeStats); when(mockNodeResponse.shardsStats()).thenReturn(new ShardStats[] { mockShardStats }); when(mockNodeResponse.searchUsageStats()).thenReturn(new SearchUsageStats()); + when(mockNodeResponse.repositoryUsageStats()).thenReturn(RepositoryUsageStats.EMPTY); final Metadata metadata = testClusterState.metadata(); final ClusterStatsResponse clusterStatsResponse = new ClusterStatsResponse( @@ -533,7 +535,9 @@ public void testToXContent() throws IOException { "fielddata": { "memory_size_in_bytes": 1, "evictions": 0, - "global_ordinals":{"build_time_in_millis":1} + "global_ordinals": { + "build_time_in_millis": 1 + } }, "query_cache": { "memory_size_in_bytes": 0, @@ -563,9 +567,9 @@ public void testToXContent() throws IOException { "file_sizes": {} }, "mappings": { - "total_field_count" : 0, - "total_deduplicated_field_count" : 0, - "total_deduplicated_mapping_size_in_bytes" : 0, + "total_field_count": 0, + "total_deduplicated_field_count": 0, + "total_deduplicated_mapping_size_in_bytes": 0, "field_types": [], "runtime_field_types": [] }, @@ -581,11 +585,11 @@ public void testToXContent() throws IOException { "synonyms": {} }, "versions": [], - "search" : { - "total" : 0, - "queries" : {}, - "rescorers" : {}, - "sections" : {} + "search": { + "total": 0, + "queries": {}, + "rescorers": {}, + "sections": {} }, "dense_vector": { "value_count": 0 @@ -749,7 +753,8 @@ public void testToXContent() throws IOException { "cleanups": 0 }, "repositories": {} - } + }, + "repositories": {} }, "cluster_state": { "nodes_hash": 1314980060, diff --git a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java index ecc8401e1d79a..a9b8fe51c01cc 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/qa/azure/src/javaRestTest/java/org/elasticsearch/repositories/blobstore/testkit/analyze/AzureRepositoryAnalysisRestIT.java @@ -8,6 +8,8 @@ import fixture.azure.AzureHttpFixture; +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Booleans; @@ -15,15 +17,20 @@ import org.elasticsearch.test.TestTrustStore; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ObjectPath; +import org.hamcrest.Matcher; import org.junit.ClassRule; import org.junit.rules.RuleChain; import org.junit.rules.TestRule; +import java.io.IOException; import java.util.Map; import java.util.function.Predicate; import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class AzureRepositoryAnalysisRestIT extends AbstractRepositoryAnalysisRestTestCase { private static final boolean USE_FIXTURE = Booleans.parseBoolean(System.getProperty("test.azure.fixture", "true")); @@ -119,4 +126,34 @@ protected Settings repositorySettings() { return Settings.builder().put("client", "repository_test_kit").put("container", container).put("base_path", basePath).build(); } + + public void testClusterStats() throws IOException { + registerRepository(randomIdentifier(), repositoryType(), true, repositorySettings()); + + final var request = new Request(HttpGet.METHOD_NAME, "/_cluster/stats"); + final var response = client().performRequest(request); + assertOK(response); + + final var objectPath = ObjectPath.createFromResponse(response); + assertThat(objectPath.evaluate("repositories.azure.count"), isSetIff(true)); + assertThat(objectPath.evaluate("repositories.azure.read_write"), isSetIff(true)); + + assertThat(objectPath.evaluate("repositories.azure.uses_key_credentials"), isSetIff(Strings.hasText(AZURE_TEST_KEY))); + assertThat(objectPath.evaluate("repositories.azure.uses_sas_token"), isSetIff(Strings.hasText(AZURE_TEST_SASTOKEN))); + assertThat( + objectPath.evaluate("repositories.azure.uses_default_credentials"), + isSetIff((Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY)) == false) + ); + assertThat( + objectPath.evaluate("repositories.azure.uses_managed_identity"), + isSetIff( + (Strings.hasText(AZURE_TEST_SASTOKEN) || Strings.hasText(AZURE_TEST_KEY) || Strings.hasText(AZURE_TEST_CLIENT_ID)) == false + ) + ); + assertThat(objectPath.evaluate("repositories.azure.uses_workload_identity"), isSetIff(Strings.hasText(AZURE_TEST_CLIENT_ID))); + } + + private static Matcher isSetIff(boolean predicate) { + return predicate ? equalTo(1) : nullValue(Integer.class); + } } From b7e1d5593b42f03aecc387160af6f452c4d25351 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Tue, 27 Aug 2024 15:45:53 +0200 Subject: [PATCH 14/46] Fix connection timeout for OpenIdConnectAuthenticator get Userinfo (#112230) * Fix connection timeout for OpenIdConnectAuthenticator get Userinfo * Update docs/changelog/112230.yaml --- docs/changelog/112230.yaml | 5 +++++ .../security/authc/oidc/OpenIdConnectAuthenticator.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112230.yaml diff --git a/docs/changelog/112230.yaml b/docs/changelog/112230.yaml new file mode 100644 index 0000000000000..ef12dc3f78267 --- /dev/null +++ b/docs/changelog/112230.yaml @@ -0,0 +1,5 @@ +pr: 112230 +summary: Fix connection timeout for `OpenIdConnectAuthenticator` get Userinfo +area: Security +type: bug +issues: [] diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java index 0f34850b861b7..c2e0caf7234cb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/oidc/OpenIdConnectAuthenticator.java @@ -718,7 +718,7 @@ private CloseableHttpAsyncClient createHttpClient() { connectionManager.setMaxTotal(realmConfig.getSetting(HTTP_MAX_CONNECTIONS)); final RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECT_TIMEOUT).getMillis())) - .setConnectionRequestTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECTION_READ_TIMEOUT).getSeconds())) + .setConnectionRequestTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_CONNECTION_READ_TIMEOUT).getMillis())) .setSocketTimeout(Math.toIntExact(realmConfig.getSetting(HTTP_SOCKET_TIMEOUT).getMillis())) .build(); From b14bada16f3c66598e18393d8d30271a81096ec3 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 27 Aug 2024 10:44:29 -0400 Subject: [PATCH 15/46] [ML] Update inference interfaces for streaming (#112234) Using InferenceServiceResults and InferenceAction to stream ChunkedToXContent through to the Rest handler. --- .../inference/InferenceServiceResults.java | 24 ++++++++++++++++--- .../inference/action/InferenceAction.java | 20 ++++++++++++++++ 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java index f8330404c1538..0000e0ddc9af9 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceServiceResults.java @@ -13,17 +13,18 @@ import java.util.List; import java.util.Map; +import java.util.concurrent.Flow; public interface InferenceServiceResults extends NamedWriteable, ChunkedToXContent { /** - * Transform the result to match the format required for the TransportCoordinatedInferenceAction. + *

Transform the result to match the format required for the TransportCoordinatedInferenceAction. * For the inference plugin TextEmbeddingResults, the {@link #transformToLegacyFormat()} transforms the * results into an intermediate format only used by the plugin's return value. It doesn't align with what the * TransportCoordinatedInferenceAction expects. TransportCoordinatedInferenceAction expects an ml plugin - * TextEmbeddingResults. + * TextEmbeddingResults.

* - * For other results like SparseEmbeddingResults, this method can be a pass through to the transformToLegacyFormat. + *

For other results like SparseEmbeddingResults, this method can be a pass through to the transformToLegacyFormat.

*/ List transformToCoordinationFormat(); @@ -37,4 +38,21 @@ public interface InferenceServiceResults extends NamedWriteable, ChunkedToXConte * Convert the result to a map to aid with test assertions */ Map asMap(); + + /** + * Returns {@code true} if these results are streamed as chunks, or {@code false} if these results contain the entire payload. + * Defaults to {@code false}. + */ + default boolean isStreaming() { + return false; + } + + /** + * When {@link #isStreaming()} is {@code true}, the InferenceAction.Results will subscribe to this publisher. + * Implementations should follow the {@link java.util.concurrent.Flow.Publisher} spec to stream the chunks. + */ + default Flow.Publisher publisher() { + assert isStreaming() == false : "This must be implemented when isStreaming() == true"; + throw new UnsupportedOperationException("This must be implemented when isStreaming() == true"); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java index 7ecb5aef4ce8d..c38f508db1b6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/InferenceAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.TimeValue; @@ -40,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Flow; import static org.elasticsearch.core.Strings.format; @@ -391,6 +393,24 @@ public InferenceServiceResults getResults() { return results; } + /** + * Returns {@code true} if these results are streamed as chunks, or {@code false} if these results contain the entire payload. + * Currently set to false while it is being implemented. + */ + public boolean isStreaming() { + return false; + } + + /** + * When {@link #isStreaming()} is {@code true}, the RestHandler will subscribe to this publisher. + * When the RestResponse is finished with the current chunk, it will request the next chunk using the subscription. + * If the RestResponse is closed, it will cancel the subscription. + */ + public Flow.Publisher publisher() { + assert isStreaming() == false : "This must be implemented when isStreaming() == true"; + throw new UnsupportedOperationException("This must be implemented when isStreaming() == true"); + } + @Override public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) { From b43470feeb82d602f549b6dfee9243d9afa6ce25 Mon Sep 17 00:00:00 2001 From: Oleksandr Kolomiiets Date: Tue, 27 Aug 2024 07:55:50 -0700 Subject: [PATCH 16/46] Fix nested field generation in StandardVersusLogsIndexModeRandomDataChallengeRestIT (#112223) --- .../logsdb/datageneration/fields/Context.java | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java index 647d5bff152d1..62130967508f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java +++ b/test/framework/src/main/java/org/elasticsearch/logsdb/datageneration/fields/Context.java @@ -13,6 +13,7 @@ import org.elasticsearch.logsdb.datageneration.datasource.DataSourceResponse; import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; class Context { private final DataGeneratorSpecification specification; @@ -21,13 +22,14 @@ class Context { private final DataSourceResponse.FieldTypeGenerator fieldTypeGenerator; private final DataSourceResponse.ObjectArrayGenerator objectArrayGenerator; private final int objectDepth; - private final int nestedFieldsCount; + // We don't need atomicity, but we need to pass counter by reference to accumulate total value from sub-objects. + private final AtomicInteger nestedFieldsCount; Context(DataGeneratorSpecification specification) { - this(specification, 0, 0); + this(specification, 0, new AtomicInteger(0)); } - private Context(DataGeneratorSpecification specification, int objectDepth, int nestedFieldsCount) { + private Context(DataGeneratorSpecification specification, int objectDepth, AtomicInteger nestedFieldsCount) { this.specification = specification; this.childFieldGenerator = specification.dataSource().get(new DataSourceRequest.ChildFieldGenerator(specification)); this.fieldTypeGenerator = specification.dataSource().get(new DataSourceRequest.FieldTypeGenerator()); @@ -53,7 +55,8 @@ public Context subObject() { } public Context nestedObject() { - return new Context(specification, objectDepth + 1, nestedFieldsCount + 1); + nestedFieldsCount.incrementAndGet(); + return new Context(specification, objectDepth + 1, nestedFieldsCount); } public boolean shouldAddObjectField() { @@ -63,7 +66,7 @@ public boolean shouldAddObjectField() { public boolean shouldAddNestedField() { return childFieldGenerator.generateNestedSubObject() && objectDepth < specification.maxObjectDepth() - && nestedFieldsCount < specification.nestedFieldsLimit(); + && nestedFieldsCount.get() < specification.nestedFieldsLimit(); } public Optional generateObjectArray() { From ed515138160da2b2431fd93462d3f3b7178e2e1b Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 27 Aug 2024 10:57:17 -0400 Subject: [PATCH 17/46] ESQL: Remove `LogicalPlan` from old serialization (#112237) This removes `LogicalPlan` subclasses from `PlanNamedTypes` because it is no longer used. --- .../xpack/esql/io/stream/PlanNamedTypes.java | 35 +------------ .../esql/io/stream/PlanNamedTypesTests.java | 52 ------------------- 2 files changed, 1 insertion(+), 86 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 180ba8c028e6a..77d982453203c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -23,24 +23,9 @@ import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.index.EsIndex; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; -import org.elasticsearch.xpack.esql.plan.logical.Limit; import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Lookup; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -132,25 +117,7 @@ public static List namedTypeEntries() { of(PhysicalPlan.class, ProjectExec.class, PlanNamedTypes::writeProjectExec, PlanNamedTypes::readProjectExec), of(PhysicalPlan.class, RowExec.class, PlanNamedTypes::writeRowExec, PlanNamedTypes::readRowExec), of(PhysicalPlan.class, ShowExec.class, PlanNamedTypes::writeShowExec, PlanNamedTypes::readShowExec), - of(PhysicalPlan.class, TopNExec.class, PlanNamedTypes::writeTopNExec, PlanNamedTypes::readTopNExec), - // Logical Plan Nodes - a subset of plans that end up being actually serialized - of(LogicalPlan.class, Aggregate.ENTRY), - of(LogicalPlan.class, Dissect.ENTRY), - of(LogicalPlan.class, EsRelation.ENTRY), - of(LogicalPlan.class, Eval.ENTRY), - of(LogicalPlan.class, Enrich.ENTRY), - of(LogicalPlan.class, EsqlProject.ENTRY), - of(LogicalPlan.class, Filter.ENTRY), - of(LogicalPlan.class, Grok.ENTRY), - of(LogicalPlan.class, InlineStats.ENTRY), - of(LogicalPlan.class, Join.ENTRY), - of(LogicalPlan.class, Limit.ENTRY), - of(LogicalPlan.class, LocalRelation.ENTRY), - of(LogicalPlan.class, Lookup.ENTRY), - of(LogicalPlan.class, MvExpand.ENTRY), - of(LogicalPlan.class, OrderBy.ENTRY), - of(LogicalPlan.class, Project.ENTRY), - of(LogicalPlan.class, TopN.ENTRY) + of(PhysicalPlan.class, TopNExec.class, PlanNamedTypes::writeTopNExec, PlanNamedTypes::readTopNExec) ); return declared; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index e5f195b053349..56ab1bd41693e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -38,24 +38,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; -import org.elasticsearch.xpack.esql.plan.logical.Dissect; -import org.elasticsearch.xpack.esql.plan.logical.Enrich; -import org.elasticsearch.xpack.esql.plan.logical.EsRelation; -import org.elasticsearch.xpack.esql.plan.logical.Eval; -import org.elasticsearch.xpack.esql.plan.logical.Filter; -import org.elasticsearch.xpack.esql.plan.logical.Grok; -import org.elasticsearch.xpack.esql.plan.logical.InlineStats; -import org.elasticsearch.xpack.esql.plan.logical.Limit; -import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.esql.plan.logical.Lookup; -import org.elasticsearch.xpack.esql.plan.logical.MvExpand; -import org.elasticsearch.xpack.esql.plan.logical.OrderBy; -import org.elasticsearch.xpack.esql.plan.logical.Project; -import org.elasticsearch.xpack.esql.plan.logical.TopN; -import org.elasticsearch.xpack.esql.plan.logical.join.Join; -import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -130,40 +112,6 @@ public void testPhysicalPlanEntries() { assertMap(actual, matchesList(expected)); } - // List of known serializable logical plan nodes - this should be kept up to date or retrieved - // programmatically. - public static final List> LOGICAL_PLAN_NODE_CLS = List.of( - Aggregate.class, - Dissect.class, - Enrich.class, - EsRelation.class, - EsqlProject.class, - Eval.class, - Filter.class, - Grok.class, - InlineStats.class, - Join.class, - Limit.class, - LocalRelation.class, - Lookup.class, - MvExpand.class, - OrderBy.class, - Project.class, - TopN.class - ); - - // Tests that all logical plan nodes have a suitably named serialization entry. - public void testLogicalPlanEntries() { - var expected = LOGICAL_PLAN_NODE_CLS.stream().map(Class::getSimpleName).toList(); - var actual = PlanNamedTypes.namedTypeEntries() - .stream() - .filter(e -> e.categoryClass().isAssignableFrom(LogicalPlan.class)) - .map(PlanNameRegistry.Entry::name) - .sorted() - .toList(); - assertMap(actual, matchesList(expected)); - } - // Tests that all names are unique - there should be a good reason if this is not the case. public void testUniqueNames() { var actual = PlanNamedTypes.namedTypeEntries().stream().map(PlanNameRegistry.Entry::name).distinct().toList(); From bd2d6aa55fdf839ca42ebf04a6493732b6c94b24 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 27 Aug 2024 09:14:49 -0600 Subject: [PATCH 18/46] Fix template alias parsing livelock (#112217) * Fix template alias parsing livelock This commit fixes an issue with templates parsing alias definitions that can cause the ES thread to hang indefinitely. Due to the malformed alias definition, the parsing gets into a loop which never exits. In this commit a null check in both the component template and alias parsing code is added, which prevents the looping. --- docs/changelog/112217.yaml | 5 +++++ .../cluster/metadata/AliasMetadata.java | 2 ++ .../cluster/metadata/Template.java | 6 +++++- .../metadata/ComponentTemplateTests.java | 19 +++++++++++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/112217.yaml diff --git a/docs/changelog/112217.yaml b/docs/changelog/112217.yaml new file mode 100644 index 0000000000000..bb367d6128001 --- /dev/null +++ b/docs/changelog/112217.yaml @@ -0,0 +1,5 @@ +pr: 112217 +summary: Fix template alias parsing livelock +area: Indices APIs +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java index a0f4a929dafdb..ff412d629b3b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasMetadata.java @@ -396,6 +396,8 @@ public static AliasMetadata fromXContent(XContentParser parser) throws IOExcepti } else if ("is_hidden".equals(currentFieldName)) { builder.isHidden(parser.booleanValue()); } + } else if (token == null) { + throw new IllegalArgumentException("unexpected null token while parsing alias"); } } return builder.build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index 70440adc4ebbe..b044ef6042428 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -70,7 +70,11 @@ public class Template implements SimpleDiffable