From 507c1b6a1ada3b90afffcca01367ed1d58faadb5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Sat, 27 Apr 2024 20:14:00 -0400 Subject: [PATCH 001/244] ESQL: Add Block#lookup method (#107982) This adds a method to build a new `Block` by looking up the values in an existing `Block`. Like `BlockHash#lookup` this returns a `ReleasableIterator`. This should allow us to load values using the results of `BlockHash#lookup`. --- x-pack/plugin/esql/compute/build.gradle | 27 ++++ .../compute/data/BooleanArrayBlock.java | 7 + .../compute/data/BooleanBigArrayBlock.java | 7 + .../compute/data/BooleanBlock.java | 5 + .../compute/data/BooleanLookup.java | 96 ++++++++++++++ .../compute/data/BooleanVectorBlock.java | 8 ++ .../compute/data/BytesRefArrayBlock.java | 7 + .../compute/data/BytesRefBlock.java | 5 + .../compute/data/BytesRefLookup.java | 99 ++++++++++++++ .../compute/data/BytesRefVectorBlock.java | 8 ++ .../compute/data/DoubleArrayBlock.java | 7 + .../compute/data/DoubleBigArrayBlock.java | 7 + .../compute/data/DoubleBlock.java | 5 + .../compute/data/DoubleLookup.java | 96 ++++++++++++++ .../compute/data/DoubleVectorBlock.java | 8 ++ .../compute/data/IntArrayBlock.java | 7 + .../compute/data/IntBigArrayBlock.java | 7 + .../elasticsearch/compute/data/IntBlock.java | 5 + .../elasticsearch/compute/data/IntLookup.java | 96 ++++++++++++++ .../compute/data/IntVectorBlock.java | 8 ++ .../compute/data/LongArrayBlock.java | 7 + .../compute/data/LongBigArrayBlock.java | 7 + .../elasticsearch/compute/data/LongBlock.java | 5 + .../compute/data/LongLookup.java | 96 ++++++++++++++ .../compute/data/LongVectorBlock.java | 8 ++ .../blockhash/PackedValuesBlockHash.java | 14 +- .../org/elasticsearch/compute/data/Block.java | 34 +++++ .../compute/data/ConstantNullBlock.java | 7 + .../elasticsearch/compute/data/DocBlock.java | 7 + .../compute/data/OrdinalBytesRefBlock.java | 7 + .../compute/data/X-ArrayBlock.java.st | 12 +- .../compute/data/X-BigArrayBlock.java.st | 7 + .../compute/data/X-Block.java.st | 5 + .../compute/data/X-Lookup.java.st | 111 ++++++++++++++++ .../compute/data/X-VectorBlock.java.st | 8 ++ .../compute/data/BasicBlockTests.java | 122 ++++++++++++++++++ .../compute/data/BigArrayVectorTests.java | 41 ++++++ .../compute/data/BlockMultiValuedTests.java | 87 +++++++++++++ 38 files changed, 1090 insertions(+), 10 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanLookup.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefLookup.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleLookup.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntLookup.java create mode 100644 x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongLookup.java create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 98fd8b0a1aa5e..b4fb7637bc679 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -350,6 +350,33 @@ tasks.named('stringTemplates').configure { it.inputFile = stateInputFile it.outputFile = "org/elasticsearch/compute/aggregation/DoubleState.java" } + // block builders + File lookupInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st") + template { + it.properties = intProperties + it.inputFile = lookupInputFile + it.outputFile = "org/elasticsearch/compute/data/IntLookup.java" + } + template { + it.properties = longProperties + it.inputFile = lookupInputFile + it.outputFile = "org/elasticsearch/compute/data/LongLookup.java" + } + template { + it.properties = doubleProperties + it.inputFile = lookupInputFile + it.outputFile = "org/elasticsearch/compute/data/DoubleLookup.java" + } + template { + it.properties = bytesRefProperties + it.inputFile = lookupInputFile + it.outputFile = "org/elasticsearch/compute/data/BytesRefLookup.java" + } + template { + it.properties = booleanProperties + it.inputFile = lookupInputFile + it.outputFile = "org/elasticsearch/compute/data/BooleanLookup.java" + } File arrayStateInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st") template { it.properties = intProperties diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index 2ec68d268ae8a..254d56f849768 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -112,6 +114,11 @@ public BooleanBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BooleanLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.BOOLEAN; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java index 51418445713b0..aac728236b136 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public BooleanBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BooleanLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.BOOLEAN; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index f365a2ed78610..8ae2984018640 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface BooleanBlock extends Block permits BooleanArrayBlock, Bo @Override BooleanBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override BooleanBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanLookup.java new file mode 100644 index 0000000000000..f969e164eef68 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link BooleanBlock}s. + * This class is generated. Do not edit it. + */ +final class BooleanLookup implements ReleasableIterator { + private final BooleanBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private boolean first; + private int valuesInPosition; + + BooleanLookup(BooleanBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public BooleanBlock next() { + try (BooleanBlock.Builder builder = positions.blockFactory().newBooleanBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendBoolean(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(BooleanBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getBoolean(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendBoolean(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendBoolean(values.getBoolean(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 70fcfeca94869..013718bb42a7d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public BooleanBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new BooleanLookup(this, positions, targetBlockSize); + } + @Override public BooleanBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 8eaf07b473a3a..c33bd12b74bbd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -10,7 +10,9 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -116,6 +118,11 @@ public BytesRefBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BytesRefLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.BYTES_REF; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index a6c75dbc1122f..d3afcfd6dde4d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -12,6 +12,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -42,6 +44,9 @@ public sealed interface BytesRefBlock extends Block permits BytesRefArrayBlock, @Override BytesRefBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override BytesRefBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefLookup.java new file mode 100644 index 0000000000000..3ec62902fe048 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefLookup.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link BytesRefBlock}s. + * This class is generated. Do not edit it. + */ +final class BytesRefLookup implements ReleasableIterator { + private final BytesRef firstScratch = new BytesRef(); + private final BytesRef valueScratch = new BytesRef(); + private final BytesRefBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private BytesRef first; + private int valuesInPosition; + + BytesRefLookup(BytesRefBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public BytesRefBlock next() { + try (BytesRefBlock.Builder builder = positions.blockFactory().newBytesRefBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendBytesRef(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(BytesRefBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getBytesRef(i, firstScratch); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendBytesRef(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendBytesRef(values.getBytesRef(i, valueScratch)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 8c8c3b59ff758..9838fde8a0ffe 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -8,6 +8,8 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -50,6 +52,12 @@ public BytesRefBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new BytesRefLookup(this, positions, targetBlockSize); + } + @Override public BytesRefBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index d545fca4fca8d..4d923e4ca77c8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -112,6 +114,11 @@ public DoubleBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new DoubleLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.DOUBLE; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java index 5698f40b530b7..203856f88c4ce 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public DoubleBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new DoubleLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.DOUBLE; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index a682c2cba019e..95f318703df62 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface DoubleBlock extends Block permits DoubleArrayBlock, Doub @Override DoubleBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override DoubleBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleLookup.java new file mode 100644 index 0000000000000..bcb8a414f7c57 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link DoubleBlock}s. + * This class is generated. Do not edit it. + */ +final class DoubleLookup implements ReleasableIterator { + private final DoubleBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private double first; + private int valuesInPosition; + + DoubleLookup(DoubleBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public DoubleBlock next() { + try (DoubleBlock.Builder builder = positions.blockFactory().newDoubleBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendDouble(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(DoubleBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getDouble(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendDouble(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendDouble(values.getDouble(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index eec6675e93ae7..e76a4e0c5fdee 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public DoubleBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new DoubleLookup(this, positions, targetBlockSize); + } + @Override public DoubleBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 41c9d3b84485d..6231e8f9c5a10 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -112,6 +114,11 @@ public IntBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new IntLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.INT; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java index 66c0b15415418..a1e84db8c4f27 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public IntBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new IntLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.INT; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index e9d606b51c6a1..21d40170151a5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface IntBlock extends Block permits IntArrayBlock, IntVectorB @Override IntBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override IntBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntLookup.java new file mode 100644 index 0000000000000..b7ea15cd9d818 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link IntBlock}s. + * This class is generated. Do not edit it. + */ +final class IntLookup implements ReleasableIterator { + private final IntBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private int first; + private int valuesInPosition; + + IntLookup(IntBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public IntBlock next() { + try (IntBlock.Builder builder = positions.blockFactory().newIntBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendInt(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(IntBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getInt(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendInt(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendInt(values.getInt(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 39f8426a8da3a..70bcf6919bea6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public IntBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new IntLookup(this, positions, targetBlockSize); + } + @Override public IntBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 56370f718bae0..d8357e5d367cc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -112,6 +114,11 @@ public LongBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new LongLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.LONG; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java index e3b17cc7be5d4..0ccd4ab368659 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -9,7 +9,9 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public LongBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new LongLookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.LONG; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 3e1c5fcfaac95..5a11ee8e2a6e3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -11,6 +11,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -38,6 +40,9 @@ public sealed interface LongBlock extends Block permits LongArrayBlock, LongVect @Override LongBlock filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override LongBlock expand(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongLookup.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongLookup.java new file mode 100644 index 0000000000000..ca1b06d70b1d1 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongLookup.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link LongBlock}s. + * This class is generated. Do not edit it. + */ +final class LongLookup implements ReleasableIterator { + private final LongBlock values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private long first; + private int valuesInPosition; + + LongLookup(LongBlock values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public LongBlock next() { + try (LongBlock.Builder builder = positions.blockFactory().newLongBlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendLong(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy(LongBlock.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { + first = values.getLong(i); + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.appendLong(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } + builder.appendLong(values.getLong(i)); + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index b573e025c0be1..b6f1e8e77505d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -49,6 +51,12 @@ public LongBlock filter(int... positions) { return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new LongLookup(this, positions, targetBlockSize); + } + @Override public LongBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java index 769155db5ecfa..809c433a000a7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java @@ -59,7 +59,6 @@ */ final class PackedValuesBlockHash extends BlockHash { static final int DEFAULT_BATCH_SIZE = Math.toIntExact(ByteSizeValue.ofKb(10).getBytes()); - private static final long MAX_LOOKUP = 100_000; private final int emitBatchSize; private final BytesRefHash bytesRefHash; @@ -183,14 +182,14 @@ public ReleasableIterator lookup(Page page, ByteSizeValue targetBlockS class LookupWork implements ReleasableIterator { private final Group[] groups; - private final long targetBytesSize; + private final long targetByteSize; private final int positionCount; private int position; - LookupWork(Page page, long targetBytesSize, int batchSize) { + LookupWork(Page page, long targetByteSize, int batchSize) { this.groups = specs.stream().map(s -> new Group(s, page, batchSize)).toArray(Group[]::new); this.positionCount = page.getPositionCount(); - this.targetBytesSize = targetBytesSize; + this.targetByteSize = targetByteSize; } @Override @@ -200,9 +199,10 @@ public boolean hasNext() { @Override public IntBlock next() { - int size = Math.toIntExact(Math.min(Integer.MAX_VALUE, targetBytesSize / Integer.BYTES / 2)); + int size = Math.toIntExact(Math.min(Integer.MAX_VALUE, targetByteSize / Integer.BYTES / 2)); try (IntBlock.Builder ords = blockFactory.newIntBlockBuilder(size)) { - while (position < positionCount && ords.estimatedBytes() < targetBytesSize) { + while (position < positionCount && ords.estimatedBytes() < targetByteSize) { + // TODO a test where targetByteSize is very small should still make a few rows. boolean singleEntry = startPosition(groups); if (singleEntry) { lookupSingleEntry(ords); @@ -247,7 +247,7 @@ private void lookupMultipleEntries(IntBlock.Builder ords) { } ords.appendInt(Math.toIntExact(found)); count++; - if (count > MAX_LOOKUP) { + if (count > Block.MAX_LOOKUP) { // TODO replace this with a warning and break throw new IllegalArgumentException("Found a single entry with " + count + " entries"); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 709ad4165170d..ed7ee93c99325 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -10,8 +10,10 @@ import org.apache.lucene.util.Accountable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import org.elasticsearch.index.mapper.BlockLoader; @@ -36,6 +38,11 @@ * the same block at the same time. */ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, RefCounted, Releasable { + /** + * The maximum number of values that can be added to one position via lookup. + * TODO maybe make this everywhere? + */ + long MAX_LOOKUP = 100_000; /** * {@return an efficient dense single-value view of this block}. @@ -114,6 +121,33 @@ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, R */ Block filter(int... positions); + /** + * Builds an Iterator of new {@link Block}s with the same {@link #elementType} + * as this Block whose values are copied from positions in this Block. It has the + * same number of {@link #getPositionCount() positions} as the {@code positions} + * parameter. + *

+ * For example, this this block contained {@code [a, b, [b, c]]} + * and were called with the block {@code [0, 1, 1, [1, 2]]} then the + * result would be {@code [a, b, b, [b, b, c]]}. + *

+ *

+ * This process produces {@code count(this) * count(positions)} values per + * positions which could be quite quite large. Instead of returning a single + * Block, this returns an Iterator of Blocks containing all of the promised + * values. + *

+ *

+ * The returned {@link ReleasableIterator} may retain a reference to {@link Block}s + * inside the {@link Page}. Close it to release those references. + *

+ *

+ * This block is built using the same {@link BlockFactory} as was used to + * build the {@code positions} parameter. + *

+ */ + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + /** * How are multivalued fields ordered? * Some operators can enable its optimization when mv_values are sorted ascending or de-duplicated. diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index bdeb5334e0da7..1baa4d2283b25 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -11,6 +11,8 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import java.io.IOException; import java.util.Objects; @@ -75,6 +77,11 @@ public ConstantNullBlock filter(int... positions) { return (ConstantNullBlock) blockFactory().newConstantNullBlock(positions.length); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return ReleasableIterator.single((ConstantNullBlock) positions.blockFactory().newConstantNullBlock(positions.getPositionCount())); + } + public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( Block.class, "ConstantNullBlock", diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index f454abe7d2cfe..e5a0d934aa01a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -8,6 +8,8 @@ package org.elasticsearch.compute.data; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -48,6 +50,11 @@ public Block filter(int... positions) { return new DocBlock(asVector().filter(positions)); } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + throw new UnsupportedOperationException(); + } + @Override public DocBlock expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java index 64e3faca1f517..41ab5256e9109 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/OrdinalBytesRefBlock.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -118,6 +120,11 @@ public BytesRefBlock filter(int... positions) { } } + @Override + public ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new BytesRefLookup(this, positions, targetBlockSize); + } + @Override protected void closeInternal() { Releasables.close(ordinals, bytes); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 9b153317c8a0e..1de2fa239e61e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -11,15 +11,16 @@ $if(BytesRef)$ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BytesRefArray; -import org.elasticsearch.core.Releasables; - $else$ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +$endif$ +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; -$endif$ import java.io.IOException; import java.util.BitSet; @@ -132,6 +133,11 @@ $endif$ } } + @Override + public ReleasableIterator<$Type$Block> lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new $Type$Lookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.$TYPE$; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st index 53f0bb09640c5..66bdcc5d39fb0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -9,7 +9,9 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.$Array$; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import java.io.IOException; @@ -113,6 +115,11 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty } } + @Override + public ReleasableIterator<$Type$Block> lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + return new $Type$Lookup(this, positions, targetBlockSize); + } + @Override public ElementType elementType() { return ElementType.$TYPE$; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 331a5713fa3d1..b9d3dfc1f16ff 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -14,6 +14,8 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.index.mapper.BlockLoader; import java.io.IOException; @@ -58,6 +60,9 @@ $endif$ @Override $Type$Block filter(int... positions); + @Override + ReleasableIterator lookup(IntBlock positions, ByteSizeValue targetBlockSize); + @Override $Type$Block expand(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st new file mode 100644 index 0000000000000..668752fe3f59f --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Lookup.java.st @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +$if(BytesRef)$ +import org.apache.lucene.util.BytesRef; +$endif$ +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; + +/** + * Generic {@link Block#lookup} implementation {@link $Type$Block}s. + * This class is generated. Do not edit it. + */ +final class $Type$Lookup implements ReleasableIterator<$Type$Block> { +$if(BytesRef)$ + private final BytesRef firstScratch = new BytesRef(); + private final BytesRef valueScratch = new BytesRef(); +$endif$ + private final $Type$Block values; + private final IntBlock positions; + private final long targetByteSize; + private int position; + + private $type$ first; + private int valuesInPosition; + + $Type$Lookup($Type$Block values, IntBlock positions, ByteSizeValue targetBlockSize) { + values.incRef(); + positions.incRef(); + this.values = values; + this.positions = positions; + this.targetByteSize = targetBlockSize.getBytes(); + } + + @Override + public boolean hasNext() { + return position < positions.getPositionCount(); + } + + @Override + public $Type$Block next() { + try ($Type$Block.Builder builder = positions.blockFactory().new$Type$BlockBuilder(positions.getTotalValueCount())) { + int count = 0; + while (position < positions.getPositionCount()) { + int start = positions.getFirstValueIndex(position); + int end = start + positions.getValueCount(position); + valuesInPosition = 0; + for (int i = start; i < end; i++) { + copy(builder, positions.getInt(i)); + } + switch (valuesInPosition) { + case 0 -> builder.appendNull(); + case 1 -> builder.append$Type$(first); + default -> builder.endPositionEntry(); + } + position++; + // TOOD what if the estimate is super huge? should we break even with less than MIN_TARGET? + if (++count > Operator.MIN_TARGET_PAGE_SIZE && builder.estimatedBytes() < targetByteSize) { + break; + } + } + return builder.build(); + } + } + + private void copy($Type$Block.Builder builder, int valuePosition) { + if (valuePosition >= values.getPositionCount()) { + return; + } + int start = values.getFirstValueIndex(valuePosition); + int end = start + values.getValueCount(valuePosition); + for (int i = start; i < end; i++) { + if (valuesInPosition == 0) { +$if(BytesRef)$ + first = values.get$Type$(i, firstScratch); +$else$ + first = values.get$Type$(i); +$endif$ + valuesInPosition++; + continue; + } + if (valuesInPosition == 1) { + builder.beginPositionEntry(); + builder.append$Type$(first); + } + if (valuesInPosition > Block.MAX_LOOKUP) { + // TODO replace this with a warning and break + throw new IllegalArgumentException("Found a single entry with " + valuesInPosition + " entries"); + } +$if(BytesRef)$ + builder.append$Type$(values.get$Type$(i, valueScratch)); +$else$ + builder.append$Type$(values.get$Type$(i)); +$endif$ + valuesInPosition++; + } + } + + @Override + public void close() { + Releasables.close(values, positions); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 8df5cea4c883b..274457a4d5bd8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -10,6 +10,8 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; /** @@ -57,6 +59,12 @@ $endif$ return vector.filter(positions).asBlock(); } + @Override + public ReleasableIterator<$Type$Block> lookup(IntBlock positions, ByteSizeValue targetBlockSize) { + // TODO optimizations + return new $Type$Lookup(this, positions, targetBlockSize); + } + @Override public $Type$Block expand() { incRef(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java index ee505704f762b..6852cd52862b2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.ReleasableIterator; import org.elasticsearch.core.Releasables; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; @@ -38,6 +39,7 @@ import java.util.stream.IntStream; import java.util.stream.LongStream; +import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.containsString; @@ -47,6 +49,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -192,6 +195,11 @@ public void testIntBlock() { int pos = block.getInt(randomPosition(positionCount)); assertThat(pos, is(block.getInt(pos))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup(block, positions(blockFactory, 1, 2, new int[] { 1, 2 }), List.of(List.of(1), List.of(2), List.of(1, 2))); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (IntBlock.Builder blockBuilder = blockFactory.newIntBlockBuilder(1)) { IntBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -237,6 +245,15 @@ public void testConstantIntBlock() { assertThat(value, is(block.getInt(randomPosition(positionCount)))); assertThat(block.isNull(randomPosition(positionCount)), is(false)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -261,6 +278,11 @@ public void testLongBlock() { int pos = (int) block.getLong(randomPosition(positionCount)); assertThat((long) pos, is(block.getLong(pos))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup(block, positions(blockFactory, 1, 2, new int[] { 1, 2 }), List.of(List.of(1L), List.of(2L), List.of(1L, 2L))); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (LongBlock.Builder blockBuilder = blockFactory.newLongBlockBuilder(1)) { LongBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -303,6 +325,15 @@ public void testConstantLongBlock() { assertThat(value, is(block.getLong(randomPosition(positionCount)))); assertThat(block.isNull(randomPosition(positionCount)), is(false)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -328,6 +359,11 @@ public void testDoubleBlock() { int pos = (int) block.getDouble(randomPosition(positionCount)); assertThat((double) pos, is(block.getDouble(pos))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup(block, positions(blockFactory, 1, 2, new int[] { 1, 2 }), List.of(List.of(1d), List.of(2d), List.of(1d, 2d))); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (DoubleBlock.Builder blockBuilder = blockFactory.newDoubleBlockBuilder(1)) { DoubleBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -371,6 +407,15 @@ public void testConstantDoubleBlock() { assertThat(value, is(block.getDouble(positionCount - 1))); assertThat(value, is(block.getDouble(randomPosition(positionCount)))); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -409,6 +454,15 @@ private void testBytesRefBlock(Supplier byteArraySupplier, boolean cho assertions.accept(bytes); } assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (BytesRefBlock.Builder blockBuilder = blockFactory.newBytesRefBlockBuilder(1)) { BytesRefBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -511,6 +565,15 @@ public void testConstantBytesRefBlock() { bytes = block.getBytesRef(randomPosition(positionCount), bytes); assertThat(bytes, is(value)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -537,6 +600,15 @@ public void testBooleanBlock() { assertThat(block.getBoolean(0), is(true)); assertThat(block.getBoolean(positionCount - 1), is((positionCount - 1) % 10 == 0)); assertSingleValueDenseBlock(block); + if (positionCount > 1) { + assertLookup( + block, + positions(blockFactory, 1, 0, new int[] { 1, 0 }), + List.of(List.of(false), List.of(true), List.of(false, true)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); try (BooleanBlock.Builder blockBuilder = blockFactory.newBooleanBlockBuilder(1)) { BooleanBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); @@ -577,6 +649,15 @@ public void testConstantBooleanBlock() { assertThat(block.getBoolean(positionCount - 1), is(value)); assertThat(block.getBoolean(randomPosition(positionCount)), is(value)); assertSingleValueDenseBlock(block); + if (positionCount > 2) { + assertLookup( + block, + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(value), List.of(value), List.of(value, value)) + ); + } + assertLookup(block, positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, block); releaseAndAssertBreaker(block); } } @@ -1383,4 +1464,45 @@ private Block randomBigArrayBlock() { } }; } + + static IntBlock positions(BlockFactory blockFactory, Object... positions) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(positions.length)) { + for (Object p : positions) { + if (p instanceof int[] mv) { + builder.beginPositionEntry(); + for (int v : mv) { + builder.appendInt(v); + } + builder.endPositionEntry(); + continue; + } + if (p instanceof Integer v) { + builder.appendInt(v); + continue; + } + throw new IllegalArgumentException("invalid position: " + p + "(" + p.getClass().getName() + ")"); + } + return builder.build(); + } + } + + static void assertEmptyLookup(BlockFactory blockFactory, Block block) { + try ( + IntBlock positions = positions(blockFactory); + ReleasableIterator lookup = block.lookup(positions, ByteSizeValue.ofKb(100)) + ) { + assertThat(lookup.hasNext(), equalTo(false)); + } + } + + static void assertLookup(Block block, IntBlock positions, List> expected) { + try (positions; ReleasableIterator lookup = block.lookup(positions, ByteSizeValue.ofKb(100))) { + assertThat(lookup.hasNext(), equalTo(true)); + try (Block b = lookup.next()) { + assertThat(valuesAtPositions(b, 0, b.getPositionCount()), equalTo(expected)); + assertThat(b.blockFactory(), sameInstance(positions.blockFactory())); + } + assertThat(lookup.hasNext(), equalTo(false)); + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java index 74d7e3e142d04..067cff2feba08 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java @@ -17,8 +17,13 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; +import java.util.List; import java.util.stream.IntStream; +import static java.util.Collections.singletonList; +import static org.elasticsearch.compute.data.BasicBlockTests.assertEmptyLookup; +import static org.elasticsearch.compute.data.BasicBlockTests.assertLookup; +import static org.elasticsearch.compute.data.BasicBlockTests.positions; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -53,6 +58,15 @@ public void testBoolean() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("BooleanBigArrayVector[positions=" + positionCount)); } @@ -84,6 +98,15 @@ public void testInt() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("IntBigArrayVector[positions=" + positionCount)); } @@ -115,6 +138,15 @@ public void testLong() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("LongBigArrayVector[positions=" + positionCount)); } @@ -146,6 +178,15 @@ public void testDouble() throws IOException { } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); + if (positionCount > 1) { + assertLookup( + vector.asBlock(), + positions(blockFactory, 1, 2, new int[] { 1, 2 }), + List.of(List.of(values[1]), List.of(values[2]), List.of(values[1], values[2])) + ); + } + assertLookup(vector.asBlock(), positions(blockFactory, positionCount + 1000), singletonList(null)); + assertEmptyLookup(blockFactory, vector.asBlock()); assertSerialization(block); assertThat(vector.toString(), containsString("DoubleBigArrayVector[positions=" + positionCount)); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java index 1b0e61cea8135..4579eb688d95e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockMultiValuedTests.java @@ -17,15 +17,20 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.Operator; +import org.elasticsearch.core.ReleasableIterator; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; import org.junit.After; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.function.IntUnaryOperator; import java.util.stream.IntStream; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class BlockMultiValuedTests extends ESTestCase { @ParametersFactory @@ -104,6 +109,18 @@ public void testFilteredJumbledSubsetThenExpanded() { assertFilteredThenExpanded(false, true); } + public void testLookupFromSingleOnePage() { + assertLookup(ByteSizeValue.ofMb(100), between(1, 32), p -> 1); + } + + public void testLookupFromManyOnePage() { + assertLookup(ByteSizeValue.ofMb(100), between(1, 32), p -> between(1, 5)); + } + + public void testLookupFromSingleManyPages() { + assertLookup(ByteSizeValue.ofBytes(1), between(1, 32), p -> 1); + } + private void assertFiltered(boolean all, boolean shuffled) { int positionCount = randomIntBetween(1, 16 * 1024); var b = BasicBlockTests.randomBlock(blockFactory(), elementType, positionCount, nullAllowed, 0, 10, 0, 0); @@ -212,4 +229,74 @@ public void allBreakersEmpty() throws Exception { assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); } } + + private void assertLookup(ByteSizeValue targetBytes, int positionsToCopy, IntUnaryOperator positionsPerPosition) { + BlockFactory positionsFactory = blockFactory(); + int positionCount = randomIntBetween(100, 16 * 1024); + var b = BasicBlockTests.randomBlock(blockFactory(), elementType, positionCount, nullAllowed, 0, 100, 0, 0); + try (IntBlock.Builder builder = positionsFactory.newIntBlockBuilder(positionsToCopy);) { + for (int p = 0; p < positionsToCopy; p++) { + int max = positionsPerPosition.applyAsInt(p); + switch (max) { + case 0 -> builder.appendNull(); + case 1 -> builder.appendInt(between(0, positionCount + 100)); + default -> { + builder.beginPositionEntry(); + for (int v = 0; v < max; v++) { + builder.appendInt(between(0, positionCount + 100)); + } + builder.endPositionEntry(); + } + } + } + Block copy = null; + int positionOffset = 0; + try ( + IntBlock positions = builder.build(); + ReleasableIterator lookup = b.block().lookup(positions, targetBytes); + ) { + for (int p = 0; p < positions.getPositionCount(); p++) { + if (copy == null || p - positionOffset == copy.getPositionCount()) { + if (copy != null) { + positionOffset += copy.getPositionCount(); + copy.close(); + } + assertThat(lookup.hasNext(), equalTo(true)); + copy = lookup.next(); + if (positions.getPositionCount() - positionOffset < Operator.MIN_TARGET_PAGE_SIZE) { + assertThat(copy.getPositionCount(), equalTo(positions.getPositionCount() - positionOffset)); + } else { + assertThat(copy.getPositionCount(), greaterThanOrEqualTo(Operator.MIN_TARGET_PAGE_SIZE)); + } + } + List expected = new ArrayList<>(); + int start = positions.getFirstValueIndex(p); + int end = start + positions.getValueCount(p); + for (int i = start; i < end; i++) { + int toCopy = positions.getInt(i); + if (toCopy < b.block().getPositionCount()) { + List v = BasicBlockTests.valuesAtPositions(b.block(), toCopy, toCopy + 1).get(0); + if (v != null) { + expected.addAll(v); + } + } + } + if (expected.isEmpty()) { + assertThat(copy.isNull(p - positionOffset), equalTo(true)); + } else { + assertThat(copy.isNull(p - positionOffset), equalTo(false)); + assertThat( + BasicBlockTests.valuesAtPositions(copy, p - positionOffset, p + 1 - positionOffset).get(0), + equalTo(expected) + ); + } + } + assertThat(lookup.hasNext(), equalTo(false)); + } finally { + Releasables.close(copy); + } + } finally { + b.block().close(); + } + } } From f2fe71b9387ef90d875d8e2255c9111c1e1219bc Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 29 Apr 2024 08:54:04 +0200 Subject: [PATCH 002/244] Optimise time_series aggregation for single value fields (#107990) ime series dimensions are by definition single value field. Therefore let's take advantage of that property in time-series aggregation and stop trying to iterate over dimension doc values. This change might bring better performance. --- docs/changelog/107990.yaml | 5 +++++ .../bucket/timeseries/TimeSeriesAggregator.java | 10 ++++------ 2 files changed, 9 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/107990.yaml diff --git a/docs/changelog/107990.yaml b/docs/changelog/107990.yaml new file mode 100644 index 0000000000000..80cb96aca4426 --- /dev/null +++ b/docs/changelog/107990.yaml @@ -0,0 +1,5 @@ +pr: 107990 +summary: Optimise `time_series` aggregation for single value fields +area: TSDB +type: enhancement +issues: [] diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java index 255a78408eb6d..53142f6cdf601 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregator.java @@ -122,18 +122,16 @@ protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCt SortedNumericDocValues docValues = numericVS.longValues(aggCtx.getLeafReaderContext()); dimensionConsumers.put(entry.getKey(), (docId, tsidBuilder) -> { if (docValues.advanceExact(docId)) { - for (int i = 0; i < docValues.docValueCount(); i++) { - tsidBuilder.addLong(fieldName, docValues.nextValue()); - } + assert docValues.docValueCount() == 1 : "Dimension field cannot be a multi-valued field"; + tsidBuilder.addLong(fieldName, docValues.nextValue()); } }); } else { SortedBinaryDocValues docValues = entry.getValue().bytesValues(aggCtx.getLeafReaderContext()); dimensionConsumers.put(entry.getKey(), (docId, tsidBuilder) -> { if (docValues.advanceExact(docId)) { - for (int i = 0; i < docValues.docValueCount(); i++) { - tsidBuilder.addString(fieldName, docValues.nextValue()); - } + assert docValues.docValueCount() == 1 : "Dimension field cannot be a multi-valued field"; + tsidBuilder.addString(fieldName, docValues.nextValue()); } }); } From 30d31bffb2f985f8920d1e64ac799da896512a07 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Apr 2024 08:03:32 +0100 Subject: [PATCH 003/244] Introduce `RestUtils#getMasterNodeTimeout` (#107986) Many APIs accept a `?master_timeout` parameter, but reading this parameter requires a little unnecessary boilerplate to specify the literal parameter name and default value. Moreover, today's convention is to construct a `MasterNodeRequest` and then read the default master timeout from the freshly-created request. In practice this results in a default of 30s, but we specify in the docs that this default is _always_ 30s, and in principle one could create a transport request with a different initial value which would deviate from the documented behaviour. This commit introduces a utility method for reading this parameter in a fashion which is completely consistent with the documented behaviour. Relates #107984 --- .../RestDataStreamLifecycleStatsAction.java | 3 ++- .../RestExplainDataStreamLifecycleAction.java | 3 ++- .../RestPutDataStreamLifecycleAction.java | 3 ++- .../rest/RestModifyDataStreamsAction.java | 3 ++- .../http/DanglingIndicesRestIT.java | 5 ++-- .../cluster/RestClusterStateActionIT.java | 4 +++- .../org/elasticsearch/rest/RestUtils.java | 24 +++++++++++++++++++ .../RestAddVotingConfigExclusionAction.java | 3 ++- .../cluster/RestCleanupRepositoryAction.java | 3 ++- ...RestClearVotingConfigExclusionsAction.java | 3 ++- .../cluster/RestCloneSnapshotAction.java | 3 ++- .../cluster/RestClusterGetSettingsAction.java | 3 ++- .../cluster/RestClusterHealthAction.java | 3 ++- .../cluster/RestClusterRerouteAction.java | 3 ++- .../admin/cluster/RestClusterStateAction.java | 3 ++- .../RestClusterUpdateSettingsAction.java | 5 ++-- .../cluster/RestCreateSnapshotAction.java | 3 ++- .../cluster/RestDeleteDesiredNodesAction.java | 4 +++- .../cluster/RestDeleteRepositoryAction.java | 3 ++- .../cluster/RestDeleteSnapshotAction.java | 3 ++- .../cluster/RestDeleteStoredScriptAction.java | 3 ++- .../cluster/RestGetDesiredNodesAction.java | 4 +++- .../RestGetFeatureUpgradeStatusAction.java | 4 +++- .../cluster/RestGetRepositoriesAction.java | 3 ++- .../admin/cluster/RestGetSnapshotsAction.java | 3 ++- .../cluster/RestGetStoredScriptAction.java | 3 ++- .../RestPendingClusterTasksAction.java | 3 ++- .../cluster/RestPostFeatureUpgradeAction.java | 4 +++- .../RestPrevalidateNodeRemovalAction.java | 3 ++- .../cluster/RestPutRepositoryAction.java | 3 ++- .../cluster/RestPutStoredScriptAction.java | 3 ++- .../cluster/RestRestoreSnapshotAction.java | 3 ++- .../cluster/RestSnapshotsStatusAction.java | 3 ++- .../RestSnapshottableFeaturesAction.java | 3 ++- .../cluster/RestUpdateDesiredNodesAction.java | 4 +++- .../cluster/RestVerifyRepositoryAction.java | 3 ++- .../RestDeleteDanglingIndexAction.java | 3 ++- .../RestImportDanglingIndexAction.java | 3 ++- .../indices/RestAddIndexBlockAction.java | 3 ++- .../admin/indices/RestCloseIndexAction.java | 3 ++- .../admin/indices/RestCreateIndexAction.java | 5 ++-- .../RestDeleteComponentTemplateAction.java | 3 ++- ...stDeleteComposableIndexTemplateAction.java | 3 ++- .../admin/indices/RestDeleteIndexAction.java | 3 ++- .../RestDeleteIndexTemplateAction.java | 3 ++- .../RestGetComponentTemplateAction.java | 3 ++- .../RestGetComposableIndexTemplateAction.java | 3 ++- .../indices/RestGetIndexTemplateAction.java | 3 ++- .../admin/indices/RestGetIndicesAction.java | 3 ++- .../admin/indices/RestGetMappingAction.java | 3 ++- .../admin/indices/RestGetSettingsAction.java | 3 ++- .../indices/RestIndexDeleteAliasesAction.java | 3 ++- .../indices/RestIndexPutAliasAction.java | 3 ++- .../indices/RestIndicesAliasesAction.java | 3 ++- .../admin/indices/RestOpenIndexAction.java | 3 ++- .../RestPutComponentTemplateAction.java | 3 ++- .../RestPutComposableIndexTemplateAction.java | 3 ++- .../indices/RestPutIndexTemplateAction.java | 3 ++- .../admin/indices/RestPutMappingAction.java | 3 ++- .../admin/indices/RestResizeHandler.java | 3 ++- .../indices/RestRolloverIndexAction.java | 3 ++- .../RestSimulateIndexTemplateAction.java | 5 ++-- .../indices/RestSimulateTemplateAction.java | 3 ++- .../indices/RestUpdateSettingsAction.java | 3 ++- .../rest/action/cat/RestAllocationAction.java | 3 ++- .../cat/RestCatComponentTemplateAction.java | 3 ++- .../rest/action/cat/RestIndicesAction.java | 4 ++-- .../rest/action/cat/RestMasterAction.java | 3 ++- .../rest/action/cat/RestNodeAttrsAction.java | 3 ++- .../rest/action/cat/RestNodesAction.java | 3 ++- .../cat/RestPendingClusterTasksAction.java | 3 ++- .../rest/action/cat/RestPluginsAction.java | 3 ++- .../action/cat/RestRepositoriesAction.java | 3 ++- .../rest/action/cat/RestSegmentsAction.java | 3 ++- .../rest/action/cat/RestShardsAction.java | 3 ++- .../rest/action/cat/RestSnapshotAction.java | 3 ++- .../rest/action/cat/RestTemplatesAction.java | 7 +++--- .../rest/action/cat/RestThreadPoolAction.java | 3 ++- .../ingest/RestDeletePipelineAction.java | 3 ++- .../action/ingest/RestGetPipelineAction.java | 3 ++- .../action/ingest/RestPutPipelineAction.java | 3 ++- .../reroute/ClusterRerouteRequestTests.java | 3 ++- .../elasticsearch/rest/RestUtilsTests.java | 17 +++++++++++++ ...stAddVotingConfigExclusionActionTests.java | 5 ++-- ...ClearVotingConfigExclusionActionTests.java | 3 ++- .../cluster/RestClusterHealthActionTests.java | 3 ++- .../xpack/ccr/rest/RestCcrStatsAction.java | 3 ++- .../RestDeleteAutoFollowPatternAction.java | 3 ++- .../xpack/ccr/rest/RestFollowInfoAction.java | 3 ++- .../rest/RestGetAutoFollowPatternAction.java | 3 ++- .../RestPauseAutoFollowPatternAction.java | 3 ++- .../xpack/ccr/rest/RestPauseFollowAction.java | 3 ++- .../rest/RestPutAutoFollowPatternAction.java | 3 ++- .../xpack/ccr/rest/RestPutFollowAction.java | 3 ++- .../RestResumeAutoFollowPatternAction.java | 3 ++- .../ccr/rest/RestResumeFollowAction.java | 3 ++- .../xpack/ccr/rest/RestUnfollowAction.java | 3 ++- .../license/RestDeleteLicenseAction.java | 3 ++- .../license/RestPostStartBasicLicense.java | 3 ++- .../license/RestPutLicenseAction.java | 3 ++- .../rest/action/RestXPackUsageAction.java | 4 ++-- .../rest/action/RestFreezeIndexAction.java | 5 ++-- .../ilm/action/RestDeleteLifecycleAction.java | 3 ++- .../action/RestExplainLifecycleAction.java | 3 ++- .../ilm/action/RestGetLifecycleAction.java | 3 ++- .../xpack/ilm/action/RestGetStatusAction.java | 3 ++- .../ilm/action/RestMoveToStepAction.java | 3 ++- .../ilm/action/RestPutLifecycleAction.java | 3 ++- .../RestRemoveIndexLifecyclePolicyAction.java | 3 ++- .../xpack/ilm/action/RestRetryAction.java | 3 ++- .../xpack/ilm/action/RestStartILMAction.java | 3 ++- .../xpack/ilm/action/RestStopAction.java | 3 ++- .../xpack/ml/rest/RestMlMemoryAction.java | 3 ++- .../ml/rest/RestSetUpgradeModeAction.java | 3 ++- .../datafeeds/RestDeleteDatafeedAction.java | 3 ++- .../rest/datafeeds/RestPutDatafeedAction.java | 3 ++- .../datafeeds/RestUpdateDatafeedAction.java | 3 ++- .../rest/filter/RestDeleteFilterAction.java | 3 ++- ...estUpdateTrainedModelDeploymentAction.java | 3 ++- .../ml/rest/job/RestDeleteJobAction.java | 3 ++- .../ml/rest/job/RestPostJobUpdateAction.java | 3 ++- .../xpack/ml/rest/job/RestPutJobAction.java | 3 ++- .../xpack/ml/rest/job/RestResetJobAction.java | 3 ++- .../RestRevertModelSnapshotAction.java | 3 ++- .../http/PublishableHttpResource.java | 3 ++- ...stractPublishableHttpResourceTestCase.java | 9 +++---- .../exporter/http/HttpExporterTests.java | 6 ++++- .../profiling/rest/RestGetStatusAction.java | 3 ++- .../RestMountSearchableSnapshotAction.java | 4 ++-- .../RestDeleteShutdownNodeAction.java | 4 +++- .../shutdown/RestPutShutdownNodeAction.java | 4 +++- .../RestDeleteSnapshotLifecycleAction.java | 3 ++- .../RestExecuteSnapshotLifecycleAction.java | 3 ++- .../RestExecuteSnapshotRetentionAction.java | 3 ++- .../slm/action/RestGetSLMStatusAction.java | 3 ++- .../RestGetSnapshotLifecycleAction.java | 3 ++- .../RestGetSnapshotLifecycleStatsAction.java | 3 ++- .../RestPutSnapshotLifecycleAction.java | 3 ++- .../xpack/slm/action/RestStartSLMAction.java | 3 ++- .../xpack/slm/action/RestStopSLMAction.java | 3 ++- .../rest/action/RestWatchServiceAction.java | 3 ++- .../plugin/freeze/FreezeIndexPlugin.java | 3 ++- 142 files changed, 340 insertions(+), 157 deletions(-) diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java index e734c913fe9e8..a10a955b33975 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDataStreamLifecycleStatsAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { GetDataStreamLifecycleStatsAction.Request request = new GetDataStreamLifecycleStatsAction.Request(); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute( GetDataStreamLifecycleStatsAction.INSTANCE, request, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index 522ce12d834a8..048ef0bab8e0c 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestExplainDataStreamLifecycleAction extends BaseRestHandler { @@ -41,7 +42,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient ExplainDataStreamLifecycleAction.Request explainRequest = new ExplainDataStreamLifecycleAction.Request(indices); explainRequest.includeDefaults(restRequest.paramAsBoolean("include_defaults", false)); explainRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); - explainRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", explainRequest.masterNodeTimeout())); + explainRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute( ExplainDataStreamLifecycleAction.INSTANCE, explainRequest, diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java index 70228a16d7a01..736aad08d9212 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestPutDataStreamLifecycleAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutDataStreamLifecycleAction extends BaseRestHandler { @@ -41,7 +42,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli try (XContentParser parser = request.contentParser()) { PutDataStreamLifecycleAction.Request putLifecycleRequest = PutDataStreamLifecycleAction.Request.parseRequest(parser); putLifecycleRequest.indices(Strings.splitStringByCommaToArray(request.param("name"))); - putLifecycleRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); + putLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putLifecycleRequest.ackTimeout(request.paramAsTime("timeout", putLifecycleRequest.ackTimeout())); putLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(request, putLifecycleRequest.indicesOptions())); return channel -> client.execute( diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java index 006422e4c04e7..d4d6af4091691 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/rest/RestModifyDataStreamsAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestModifyDataStreamsAction extends BaseRestHandler { @@ -43,7 +44,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli if (modifyDsRequest.getActions() == null || modifyDsRequest.getActions().isEmpty()) { throw new IllegalArgumentException("no data stream actions specified, at least one must be specified"); } - modifyDsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", modifyDsRequest.masterNodeTimeout())); + modifyDsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); modifyDsRequest.ackTimeout(request.paramAsTime("timeout", modifyDsRequest.ackTimeout())); return channel -> client.execute(ModifyDataStreamsAction.INSTANCE, modifyDsRequest, new RestToXContentListener<>(channel)); } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java index f7f46671e2354..eaf439f264ad5 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java @@ -33,6 +33,7 @@ import static org.elasticsearch.cluster.metadata.IndexGraveyard.SETTING_MAX_TOMBSTONES; import static org.elasticsearch.indices.IndicesService.WRITE_DANGLING_INDICES_INFO_SETTING; import static org.elasticsearch.rest.RestStatus.ACCEPTED; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.test.XContentTestUtils.createJsonMapView; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -111,7 +112,7 @@ public void testDanglingIndicesCanBeImported() throws Exception { importRequest.addParameter("accept_data_loss", "true"); // Ensure this parameter is accepted importRequest.addParameter("timeout", "20s"); - importRequest.addParameter("master_timeout", "20s"); + importRequest.addParameter(REST_MASTER_TIMEOUT_PARAM, "20s"); final Response importResponse = restClient.performRequest(importRequest); assertThat(importResponse.getStatusLine().getStatusCode(), equalTo(ACCEPTED.getStatus())); @@ -147,7 +148,7 @@ public void testDanglingIndicesCanBeDeleted() throws Exception { deleteRequest.addParameter("accept_data_loss", "true"); // Ensure these parameters is accepted deleteRequest.addParameter("timeout", "20s"); - deleteRequest.addParameter("master_timeout", "20s"); + deleteRequest.addParameter(REST_MASTER_TIMEOUT_PARAM, "20s"); final Response deleteResponse = restClient.performRequest(deleteRequest); assertThat(deleteResponse.getStatusLine().getStatusCode(), equalTo(ACCEPTED.getStatus())); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java index ce3439a5800a8..6f5ebcf17686d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateActionIT.java @@ -13,6 +13,8 @@ import java.io.IOException; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; + public class RestClusterStateActionIT extends ESIntegTestCase { @Override @@ -22,7 +24,7 @@ protected boolean addMockHttpTransport() { public void testInfiniteTimeOut() throws IOException { final var request = new Request("GET", "/_cluster/state/none"); - request.addParameter("master_timeout", "-1"); + request.addParameter(REST_MASTER_TIMEOUT_PARAM, "-1"); getRestClient().performRequest(request); } } diff --git a/server/src/main/java/org/elasticsearch/rest/RestUtils.java b/server/src/main/java/org/elasticsearch/rest/RestUtils.java index 4aa82f5e4b7c5..d33fa8ca8cebf 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestUtils.java +++ b/server/src/main/java/org/elasticsearch/rest/RestUtils.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.TimeValue; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; @@ -256,4 +257,27 @@ public static Optional extractTraceId(String traceparent) { return traceparent != null && traceparent.length() >= 55 ? Optional.of(traceparent.substring(3, 35)) : Optional.empty(); } + /** + * The name of the common {@code ?master_timeout} query parameter. + */ + public static final String REST_MASTER_TIMEOUT_PARAM = "master_timeout"; + + /** + * The default value for the common {@code ?master_timeout} query parameter. + */ + public static final TimeValue REST_MASTER_TIMEOUT_DEFAULT = TimeValue.timeValueSeconds(30); + + /** + * Extract the {@code ?master_timeout} parameter from the request, imposing the common default of {@code 30s} in case the parameter is + * missing. + * + * @param restRequest The request from which to extract the {@code ?master_timeout} parameter + * @return the timeout from the request, with a default of {@link #REST_MASTER_TIMEOUT_DEFAULT} ({@code 30s}) if the request does not + * specify the parameter + */ + public static TimeValue getMasterNodeTimeout(RestRequest restRequest) { + assert restRequest != null; + return restRequest.paramAsTime(REST_MASTER_TIMEOUT_PARAM, REST_MASTER_TIMEOUT_DEFAULT); + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java index 8c8624f1766b1..74ecc85e960b5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestAddVotingConfigExclusionAction extends BaseRestHandler { private static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueSeconds(30L); @@ -82,7 +83,7 @@ static AddVotingConfigExclusionsRequest resolveVotingConfigExclusionsRequest(fin request.paramAsTime("timeout", DEFAULT_TIMEOUT) ); - return resolvedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resolvedRequest.masterNodeTimeout())); + return resolvedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java index 534bb391e9ffe..7ef5b444304cf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Cleans up a repository @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String name = request.param("repository"); CleanupRepositoryRequest cleanupRepositoryRequest = new CleanupRepositoryRequest(name); cleanupRepositoryRequest.ackTimeout(request.paramAsTime("timeout", cleanupRepositoryRequest.ackTimeout())); - cleanupRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cleanupRepositoryRequest.masterNodeTimeout())); + cleanupRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().cleanupRepository(cleanupRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java index 69b51afb8d257..ff26648476926 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionsAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestClearVotingConfigExclusionsAction extends BaseRestHandler { @@ -45,7 +46,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest request, final No static ClearVotingConfigExclusionsRequest resolveVotingConfigExclusionsRequest(final RestRequest request) { final var resolvedRequest = new ClearVotingConfigExclusionsRequest(); - resolvedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resolvedRequest.masterNodeTimeout())); + resolvedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); resolvedRequest.setTimeout(resolvedRequest.masterNodeTimeout()); resolvedRequest.setWaitForRemoval(request.paramAsBoolean("wait_for_removal", resolvedRequest.getWaitForRemoval())); return resolvedRequest; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java index 7785680a3ca8d..b6b63a6774667 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java @@ -24,6 +24,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Clones indices from one snapshot into another snapshot in the same repository @@ -51,7 +52,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC request.param("target_snapshot"), XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) ); - cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cloneSnapshotRequest.masterNodeTimeout())); + cloneSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java index 7748944306e35..2d2d241c35086 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterGetSettingsAction.java @@ -29,6 +29,7 @@ import java.util.function.Predicate; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterGetSettingsAction extends BaseRestHandler { @@ -64,7 +65,7 @@ public String getName() { private static void setUpRequestParams(MasterNodeReadRequest clusterRequest, RestRequest request) { clusterRequest.local(request.paramAsBoolean("local", clusterRequest.local())); - clusterRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterRequest.masterNodeTimeout())); + clusterRequest.masterNodeTimeout(getMasterNodeTimeout(request)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java index 6518ccc6e0c94..b849181f62dc0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java @@ -31,6 +31,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterHealthAction extends BaseRestHandler { @@ -63,7 +64,7 @@ public static ClusterHealthRequest fromRequest(final RestRequest request) { final ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(indices); clusterHealthRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterHealthRequest.indicesOptions())); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); - clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout())); + clusterHealthRequest.masterNodeTimeout(getMasterNodeTimeout(request)); clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout())); String waitForStatus = request.param("wait_for_status"); if (waitForStatus != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java index a42882d1144c2..fee4cce3e7c3f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -31,6 +31,7 @@ import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterRerouteAction extends BaseRestHandler { @@ -98,7 +99,7 @@ public static ClusterRerouteRequest createRequest(RestRequest request) throws IO clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain())); clusterRerouteRequest.ackTimeout(request.paramAsTime("timeout", clusterRerouteRequest.ackTimeout())); clusterRerouteRequest.setRetryFailed(request.paramAsBoolean("retry_failed", clusterRerouteRequest.isRetryFailed())); - clusterRerouteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterRerouteRequest.masterNodeTimeout())); + clusterRerouteRequest.masterNodeTimeout(getMasterNodeTimeout(request)); request.applyContentParser(parser -> PARSER.parse(parser, clusterRerouteRequest, null)); return clusterRerouteRequest; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java index 72bea78e0103b..b3fb5ccfdfddf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterStateAction.java @@ -42,6 +42,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterStateAction extends BaseRestHandler { @@ -81,7 +82,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterStateRequest.indicesOptions())); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); if (request.hasParam("wait_for_metadata_version")) { clusterStateRequest.waitForMetadataVersion(request.paramAsLong("wait_for_metadata_version", 0)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java index cf22e403e1def..9f34ff5087094 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterUpdateSettingsAction.java @@ -24,6 +24,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestClusterUpdateSettingsAction extends BaseRestHandler { @@ -45,9 +46,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); clusterUpdateSettingsRequest.ackTimeout(request.paramAsTime("timeout", clusterUpdateSettingsRequest.ackTimeout())); - clusterUpdateSettingsRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", clusterUpdateSettingsRequest.masterNodeTimeout()) - ); + clusterUpdateSettingsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); Map source; try (XContentParser parser = request.contentParser()) { source = parser.map(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java index 3e2543230ab06..9491ecfcc1115 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java @@ -21,6 +21,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Creates a new snapshot @@ -44,7 +45,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String snapshot = request.param("snapshot"); CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); request.applyContentParser(p -> createSnapshotRequest.source(p.mapOrdered())); - createSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createSnapshotRequest.masterNodeTimeout())); + createSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); return channel -> client.admin().cluster().createSnapshot(createSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java index 4ecd784ecd37c..18045828f4401 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteDesiredNodesAction.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestDeleteDesiredNodesAction extends BaseRestHandler { @Override public String getName() { @@ -32,7 +34,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final AcknowledgedRequest.Plain deleteDesiredNodesRequest = new AcknowledgedRequest.Plain(); - deleteDesiredNodesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteDesiredNodesRequest.masterNodeTimeout())); + deleteDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> client.execute( TransportDeleteDesiredNodesAction.TYPE, deleteDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index 3dc979f295530..a3ecaf3127c44 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Unregisters a repository @@ -45,7 +46,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String name = request.param("repository"); DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(name); deleteRepositoryRequest.ackTimeout(request.paramAsTime("timeout", deleteRepositoryRequest.ackTimeout())); - deleteRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRepositoryRequest.masterNodeTimeout())); + deleteRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() .deleteRepository( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java index bedd44cf9f559..ad7bdc8a2c9b0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Deletes a snapshot @@ -43,7 +44,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String repository = request.param("repository"); String[] snapshots = Strings.splitStringByCommaToArray(request.param("snapshot")); DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(repository, snapshots); - deleteSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteSnapshotRequest.masterNodeTimeout())); + deleteSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java index ca6a9b5fe1f22..46d48b90d283e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteStoredScriptAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteStoredScriptAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client String id = request.param("id"); DeleteStoredScriptRequest deleteStoredScriptRequest = new DeleteStoredScriptRequest(id); deleteStoredScriptRequest.ackTimeout(request.paramAsTime("timeout", deleteStoredScriptRequest.ackTimeout())); - deleteStoredScriptRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteStoredScriptRequest.masterNodeTimeout())); + deleteStoredScriptRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().deleteStoredScript(deleteStoredScriptRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java index 869c34896c936..ae375309c301f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredNodesAction.java @@ -17,6 +17,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestGetDesiredNodesAction extends BaseRestHandler { @Override public String getName() { @@ -31,7 +33,7 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final GetDesiredNodesAction.Request getDesiredNodesRequest = new GetDesiredNodesAction.Request(); - getDesiredNodesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getDesiredNodesRequest.masterNodeTimeout())); + getDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> client.execute( GetDesiredNodesAction.INSTANCE, getDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java index 9e53a76cdb131..13fbf3504ebc0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetFeatureUpgradeStatusAction.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + /** * Endpoint for getting the system feature upgrade status */ @@ -41,7 +43,7 @@ public boolean allowSystemIndexAccessByDefault() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final GetFeatureUpgradeStatusRequest req = new GetFeatureUpgradeStatusRequest(); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> { client.execute(GetFeatureUpgradeStatusAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java index e3a7f2da79a11..c2d4484f1e098 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java @@ -24,6 +24,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Returns repository information @@ -51,7 +52,7 @@ public List routes() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(repositories); - getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); + getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); settingsFilter.addFilterSettingParams(request); return channel -> client.admin().cluster().getRepositories(getRepositoriesRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 7482ae7683b4a..45913b9b3ce2a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -25,6 +25,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.snapshots.SnapshotInfo.INCLUDE_REPOSITORY_XCONTENT_PARAM; import static org.elasticsearch.snapshots.SnapshotInfo.INDEX_DETAILS_XCONTENT_PARAM; import static org.elasticsearch.snapshots.SnapshotInfo.INDEX_NAMES_XCONTENT_PARAM; @@ -80,7 +81,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final SortOrder order = SortOrder.fromString(request.param("order", getSnapshotsRequest.order().toString())); getSnapshotsRequest.order(order); getSnapshotsRequest.includeIndexNames(request.paramAsBoolean(INDEX_NAMES_XCONTENT_PARAM, getSnapshotsRequest.includeIndexNames())); - getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); + getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .getSnapshots(getSnapshotsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java index b0d5bce981f2a..f827b07ebe96c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetStoredScriptAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetStoredScriptAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { String id = request.param("id"); GetStoredScriptRequest getRequest = new GetStoredScriptRequest(id); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() .getStoredScript(getRequest, new RestToXContentListener<>(channel, GetStoredScriptResponse::status)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java index e5745ec89533c..c38f5effc385a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPendingClusterTasksAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); + pendingClusterTasksRequest.masterNodeTimeout(getMasterNodeTimeout(request)); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.execute( TransportPendingClusterTasksAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java index ba65ab54c440c..cb9af32955abb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPostFeatureUpgradeAction.java @@ -18,6 +18,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + /** * Endpoint for triggering a system feature upgrade */ @@ -41,7 +43,7 @@ public boolean allowSystemIndexAccessByDefault() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final PostFeatureUpgradeRequest req = new PostFeatureUpgradeRequest(); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> { client.execute(PostFeatureUpgradeAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java index 01b404e02f0a6..ffcef72d244e9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPrevalidateNodeRemovalAction extends BaseRestHandler { @@ -43,7 +44,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli .setIds(ids) .setExternalIds(externalIds) .build(); - prevalidationRequest.masterNodeTimeout(request.paramAsTime("master_timeout", prevalidationRequest.masterNodeTimeout())); + prevalidationRequest.masterNodeTimeout(getMasterNodeTimeout(request)); prevalidationRequest.timeout(request.paramAsTime("timeout", prevalidationRequest.timeout())); return channel -> client.execute( PrevalidateNodeRemovalAction.INSTANCE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java index 1fe1f8da2e5c8..385fc6c19143a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java @@ -25,6 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Registers repositories @@ -50,7 +51,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRepositoryRequest.source(parser.mapOrdered()); } putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); - putRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRepositoryRequest.masterNodeTimeout())); + putRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRepositoryRequest.ackTimeout(request.paramAsTime("timeout", putRepositoryRequest.ackTimeout())); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java index 984882edcffaa..ce7052d02cb64 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutStoredScriptAction.java @@ -23,6 +23,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutStoredScriptAction extends BaseRestHandler { @@ -51,7 +52,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client StoredScriptSource source = StoredScriptSource.parse(content, xContentType); PutStoredScriptRequest putRequest = new PutStoredScriptRequest(id, context, content, request.getXContentType(), source); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.ackTimeout(request.paramAsTime("timeout", putRequest.ackTimeout())); return channel -> client.admin().cluster().putStoredScript(putRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java index b6fba8dd1054c..06524a040db36 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Restores a snapshot @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String repository = request.param("repository"); String snapshot = request.param("snapshot"); RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); - restoreSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", restoreSnapshotRequest.masterNodeTimeout())); + restoreSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); request.applyContentParser(p -> restoreSnapshotRequest.source(p.mapOrdered())); return channel -> client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index 3baebb25c4dc2..33b4ba04b826e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Returns status of currently running snapshot @@ -53,7 +54,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository).snapshots(snapshots); snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); - snapshotsStatusRequest.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusRequest.masterNodeTimeout())); + snapshotsStatusRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .snapshotsStatus(snapshotsStatusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java index 8c88e51a2c045..b36c4ac56ae71 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestSnapshottableFeaturesAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final GetSnapshottableFeaturesRequest req = new GetSnapshottableFeaturesRequest(); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> { client.execute(SnapshottableFeaturesAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index 07c54fd258845..38b191ba9f006 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -24,6 +24,8 @@ import java.util.List; import java.util.function.Predicate; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestUpdateDesiredNodesAction extends BaseRestHandler { private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestUpdateDesiredNodesAction.class); @@ -66,7 +68,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } } - updateDesiredNodesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateDesiredNodesRequest.masterNodeTimeout())); + updateDesiredNodesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java index 1eda532439e19..70df369ef9bff 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestVerifyRepositoryAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String name = request.param("repository"); VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(name); - verifyRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", verifyRepositoryRequest.masterNodeTimeout())); + verifyRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); verifyRepositoryRequest.ackTimeout(request.paramAsTime("timeout", verifyRepositoryRequest.ackTimeout())); return channel -> client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index 6fbfaa11b83e0..7b97d88f3f85b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -20,6 +20,7 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE; import static org.elasticsearch.rest.RestStatus.ACCEPTED; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteDanglingIndexAction extends BaseRestHandler { @@ -41,7 +42,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient ); deleteRequest.ackTimeout(request.paramAsTime("timeout", deleteRequest.ackTimeout())); - deleteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRequest.masterNodeTimeout())); + deleteRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( TransportDeleteDanglingIndexAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index b20eac028ba02..00af47fea8dc9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -20,6 +20,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.ACCEPTED; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestImportDanglingIndexAction extends BaseRestHandler { @Override @@ -40,7 +41,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient ); importRequest.ackTimeout(request.paramAsTime("timeout", importRequest.ackTimeout())); - importRequest.masterNodeTimeout(request.paramAsTime("master_timeout", importRequest.masterNodeTimeout())); + importRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( TransportImportDanglingIndexAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java index 9498541d5a305..4031de3720333 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestAddIndexBlockAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.rest.Scope.PUBLIC; @ServerlessScope(PUBLIC) @@ -43,7 +44,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndexMetadata.APIBlock.fromName(request.param("block")), Strings.splitStringByCommaToArray(request.param("index")) ); - addIndexBlockRequest.masterNodeTimeout(request.paramAsTime("master_timeout", addIndexBlockRequest.masterNodeTimeout())); + addIndexBlockRequest.masterNodeTimeout(getMasterNodeTimeout(request)); addIndexBlockRequest.ackTimeout(request.paramAsTime("timeout", addIndexBlockRequest.ackTimeout())); addIndexBlockRequest.indicesOptions(IndicesOptions.fromRequest(request, addIndexBlockRequest.indicesOptions())); return channel -> client.admin().indices().addBlock(addIndexBlockRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java index ea906955785b4..f79aefde8e14a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCloseIndexAction.java @@ -27,6 +27,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestCloseIndexAction extends BaseRestHandler { @@ -47,7 +48,7 @@ public String getName() { @UpdateForV9 public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); - closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout())); + closeIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); closeIndexRequest.ackTimeout(request.paramAsTime("timeout", closeIndexRequest.ackTimeout())); closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java index 37106059b7b9e..5f0e3391b762a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestCreateIndexAction.java @@ -31,6 +31,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestCreateIndexAction extends BaseRestHandler { @@ -77,7 +78,7 @@ static CreateIndexRequest prepareRequestV7(RestRequest request) { } createIndexRequest.ackTimeout(request.paramAsTime("timeout", createIndexRequest.ackTimeout())); - createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); + createIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return createIndexRequest; } @@ -116,7 +117,7 @@ static CreateIndexRequest prepareRequest(RestRequest request) { } createIndexRequest.ackTimeout(request.paramAsTime("timeout", createIndexRequest.ackTimeout())); - createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout())); + createIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return createIndexRequest; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java index 733e35ba7f927..14375f2e0a483 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComponentTemplateAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteComponentTemplateAction extends BaseRestHandler { @@ -39,7 +40,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String[] names = Strings.splitStringByCommaToArray(request.param("name")); TransportDeleteComponentTemplateAction.Request deleteReq = new TransportDeleteComponentTemplateAction.Request(names); - deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); + deleteReq.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(TransportDeleteComponentTemplateAction.TYPE, deleteReq, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java index 8c84fb054718e..2ee1fc0a18dee 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteComposableIndexTemplateAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteComposableIndexTemplateAction extends BaseRestHandler { @@ -40,7 +41,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String[] names = Strings.splitStringByCommaToArray(request.param("name")); TransportDeleteComposableIndexTemplateAction.Request deleteReq = new TransportDeleteComposableIndexTemplateAction.Request(names); - deleteReq.masterNodeTimeout(request.paramAsTime("master_timeout", deleteReq.masterNodeTimeout())); + deleteReq.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( TransportDeleteComposableIndexTemplateAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java index b39cd6ca0ded1..1e0b2c8441fcd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeleteIndexAction extends BaseRestHandler { @@ -40,7 +41,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); deleteIndexRequest.ackTimeout(request.paramAsTime("timeout", deleteIndexRequest.ackTimeout())); - deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout())); + deleteIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions())); return channel -> client.admin().indices().delete(deleteIndexRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java index 4ba54206f40fc..3c2ff2777f504 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestDeleteIndexTemplateAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteIndexTemplateAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { DeleteIndexTemplateRequest deleteIndexTemplateRequest = new DeleteIndexTemplateRequest(request.param("name")); - deleteIndexTemplateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexTemplateRequest.masterNodeTimeout())); + deleteIndexTemplateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().indices().deleteTemplate(deleteIndexTemplateRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java index 84e7865d9f699..867466ffb4052 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComponentTemplateAction.java @@ -25,6 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetComponentTemplateAction extends BaseRestHandler { @@ -49,7 +50,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetComponentTemplateAction.Request getRequest = new GetComponentTemplateAction.Request(request.param("name")); getRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); getRequest.local(request.paramAsBoolean("local", getRequest.local())); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final boolean implicitAll = getRequest.name() == null; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java index 0981d5820131e..d2349cb9126a1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetComposableIndexTemplateAction.java @@ -25,6 +25,7 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetComposableIndexTemplateAction extends BaseRestHandler { @@ -48,7 +49,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetComposableIndexTemplateAction.Request getRequest = new GetComposableIndexTemplateAction.Request(request.param("name")); getRequest.local(request.paramAsBoolean("local", getRequest.local())); - getRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRequest.masterNodeTimeout())); + getRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); final boolean implicitAll = getRequest.name() == null; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java index 2efcfe3bef119..76252c8936c82 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java @@ -27,6 +27,7 @@ import static org.elasticsearch.rest.RestRequest.Method.HEAD; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * The REST handler for get template and head template APIs. @@ -59,7 +60,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names); getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); - getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); + getIndexTemplatesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final boolean implicitAll = getIndexTemplatesRequest.names().length == 0; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index db10bdd985d59..0ad3eff5cb6d1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -29,6 +29,7 @@ import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * The REST handler for get index and head index APIs. @@ -65,7 +66,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getIndexRequest.indices(indices); getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexRequest.masterNodeTimeout())); + getIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); getIndexRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); getIndexRequest.features(GetIndexRequest.Feature.fromRequest(request)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 065399076c12a..66c7c357e790e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -28,6 +28,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetMappingAction extends BaseRestHandler { @@ -84,7 +85,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final GetMappingsRequest getMappingsRequest = new GetMappingsRequest(); getMappingsRequest.indices(indices); getMappingsRequest.indicesOptions(IndicesOptions.fromRequest(request, getMappingsRequest.indicesOptions())); - final TimeValue timeout = request.paramAsTime("master_timeout", getMappingsRequest.masterNodeTimeout()); + final TimeValue timeout = getMasterNodeTimeout(request); getMappingsRequest.masterNodeTimeout(timeout); getMappingsRequest.local(request.paramAsBoolean("local", getMappingsRequest.local())); final HttpChannel httpChannel = request.getHttpChannel(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index af72e66f6127d..96384d7e86a97 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetSettingsAction extends BaseRestHandler { @@ -55,7 +56,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .includeDefaults(renderDefaults) .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); - getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); + getSettingsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java index 67de902d50e91..348ec87ed0747 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexDeleteAliasesAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndexDeleteAliasesAction extends BaseRestHandler { @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.ackTimeout(request.paramAsTime("timeout", indicesAliasesRequest.ackTimeout())); indicesAliasesRequest.addAliasAction(AliasActions.remove().indices(indices).aliases(aliases)); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().indices().aliases(indicesAliasesRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java index 7395f00d733b1..93eac4c448522 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndexPutAliasAction.java @@ -24,6 +24,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndexPutAliasAction extends BaseRestHandler { @@ -99,7 +100,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); indicesAliasesRequest.ackTimeout(request.paramAsTime("timeout", indicesAliasesRequest.ackTimeout())); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); IndicesAliasesRequest.AliasActions aliasAction = AliasActions.add().indices(indices).alias(alias); if (routing != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java index e603d7647966f..b6a407942f629 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesAliasesAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndicesAliasesAction extends BaseRestHandler { @@ -38,7 +39,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { IndicesAliasesRequest indicesAliasesRequest = new IndicesAliasesRequest(); - indicesAliasesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", indicesAliasesRequest.masterNodeTimeout())); + indicesAliasesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); indicesAliasesRequest.ackTimeout(request.paramAsTime("timeout", indicesAliasesRequest.ackTimeout())); try (XContentParser parser = request.contentParser()) { IndicesAliasesRequest.PARSER.parse(parser, indicesAliasesRequest, null); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java index 4e00bbb5966b8..3c95ff8a17d7d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestOpenIndexAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestOpenIndexAction extends BaseRestHandler { @@ -41,7 +42,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { OpenIndexRequest openIndexRequest = new OpenIndexRequest(Strings.splitStringByCommaToArray(request.param("index"))); openIndexRequest.ackTimeout(request.paramAsTime("timeout", openIndexRequest.ackTimeout())); - openIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", openIndexRequest.masterNodeTimeout())); + openIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); openIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, openIndexRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java index fd6f529d876a2..4762e6d09f100 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java @@ -22,6 +22,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutComponentTemplateAction extends BaseRestHandler { @@ -40,7 +41,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutComponentTemplateAction.Request putRequest = new PutComponentTemplateAction.Request(request.param("name")); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); try (var parser = request.contentParser()) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java index 3171c18bc9e28..4b94691f83b7d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java @@ -22,6 +22,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutComposableIndexTemplateAction extends BaseRestHandler { @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC TransportPutComposableIndexTemplateAction.Request putRequest = new TransportPutComposableIndexTemplateAction.Request( request.param("name") ); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); try (var parser = request.contentParser()) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java index 74ca5e9d3921b..44df58b95292d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java @@ -26,6 +26,7 @@ import static java.util.Arrays.asList; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPutIndexTemplateAction extends BaseRestHandler { @@ -62,7 +63,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRequest.patterns(asList(request.paramAsStringArray("index_patterns", Strings.EMPTY_ARRAY))); } putRequest.order(request.paramAsInt("order", putRequest.order())); - putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); + putRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java index 5d4d913767fe4..6ee90db500eaf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutMappingAction.java @@ -29,6 +29,7 @@ import static org.elasticsearch.index.mapper.MapperService.isMappingSourceTyped; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutMappingAction extends BaseRestHandler { @@ -91,7 +92,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } putMappingRequest.ackTimeout(request.paramAsTime("timeout", putMappingRequest.ackTimeout())); - putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout())); + putMappingRequest.masterNodeTimeout(getMasterNodeTimeout(request)); putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions())); putMappingRequest.writeIndexOnly(request.paramAsBoolean("write_index_only", false)); return channel -> client.admin().indices().putMapping(putMappingRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index c72508a9bf646..d6c1ff4b71108 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -24,6 +24,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public abstract class RestResizeHandler extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestResizeHandler.class); @@ -50,7 +51,7 @@ public final RestChannelConsumer prepareRequest(final RestRequest request, final resizeRequest.setResizeType(getResizeType()); request.applyContentParser(resizeRequest::fromXContent); resizeRequest.ackTimeout(request.paramAsTime("timeout", resizeRequest.ackTimeout())); - resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); + resizeRequest.masterNodeTimeout(getMasterNodeTimeout(request)); resizeRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); return channel -> client.admin().indices().resizeIndex(resizeRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java index 59c37c2c015a6..4d39e44018055 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRolloverIndexAction.java @@ -26,6 +26,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestRolloverIndexAction extends BaseRestHandler { @@ -52,7 +53,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC rolloverIndexRequest.dryRun(request.paramAsBoolean("dry_run", false)); rolloverIndexRequest.lazy(request.paramAsBoolean("lazy", false)); rolloverIndexRequest.ackTimeout(request.paramAsTime("timeout", rolloverIndexRequest.ackTimeout())); - rolloverIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", rolloverIndexRequest.masterNodeTimeout())); + rolloverIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); if (DataStream.isFailureStoreFeatureFlagEnabled()) { boolean failureStore = request.paramAsBoolean("target_failure_store", false); if (failureStore) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java index 7eab7168cd100..c8b30765ab2c7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestSimulateIndexTemplateAction extends BaseRestHandler { @@ -40,9 +41,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { SimulateIndexTemplateRequest simulateIndexTemplateRequest = new SimulateIndexTemplateRequest(request.param("name")); - simulateIndexTemplateRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", simulateIndexTemplateRequest.masterNodeTimeout()) - ); + simulateIndexTemplateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); simulateIndexTemplateRequest.includeDefaults(request.paramAsBoolean("include_defaults", false)); if (request.hasContent()) { TransportPutComposableIndexTemplateAction.Request indexTemplateRequest = new TransportPutComposableIndexTemplateAction.Request( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java index bc38d549926af..53ab068c86695 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestSimulateTemplateAction extends BaseRestHandler { @@ -52,7 +53,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli simulateRequest.indexTemplateRequest(indexTemplateRequest); } - simulateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", simulateRequest.masterNodeTimeout())); + simulateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(SimulateTemplateAction.INSTANCE, simulateRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 222a22e5da3e3..af63bd23ef843 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -24,6 +24,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestUpdateSettingsAction extends BaseRestHandler { @@ -44,7 +45,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); updateSettingsRequest.ackTimeout(request.paramAsTime("timeout", updateSettingsRequest.ackTimeout())); updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); - updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); + updateSettingsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); updateSettingsRequest.reopen(request.paramAsBoolean("reopen", false)); try (var parser = request.contentParser()) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 570fb0ebc7c77..806e3939b6d1e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -34,6 +34,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestAllocationAction extends AbstractCatAction { @@ -61,7 +62,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().routingTable(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java index 4a238451bcc69..4faf44ff8c5a7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java @@ -34,6 +34,7 @@ import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * cat API class for handling get componentTemplate. @@ -76,7 +77,7 @@ protected BaseRestHandler.RestChannelConsumer doCatRequest(RestRequest request, final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().metadata(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(ClusterStateResponse clusterStateResponse) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index ccfcb9b505e92..ca3bcfbcd38e0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -44,9 +44,9 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; -import static org.elasticsearch.action.support.master.MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestIndicesAction extends AbstractCatAction { @@ -79,7 +79,7 @@ protected void documentation(StringBuilder sb) { public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.strictExpand()); - final TimeValue masterNodeTimeout = request.paramAsTime("master_timeout", DEFAULT_MASTER_NODE_TIMEOUT); + final TimeValue masterNodeTimeout = getMasterNodeTimeout(request); final boolean includeUnloadedSegments = request.paramAsBoolean("include_unloaded_segments", false); return channel -> { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java index 109fd026502c9..b0805bf423453 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestMasterAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestMasterAction extends AbstractCatAction { @@ -47,7 +48,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestResponseListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java index 9a032ce064cf6..83e6ea35ec520 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodeAttrsAction.java @@ -31,6 +31,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestNodeAttrsAction extends AbstractCatAction { @@ -55,7 +56,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index e5e0f9ee926f3..2c1f57f291969 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -60,6 +60,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestNodesAction extends AbstractCatAction { @@ -86,7 +87,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); nodesInfoRequest.clear() diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java index 19ebbd2f19df4..5ed0cd722d5db 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPendingClusterTasksAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPendingClusterTasksAction extends AbstractCatAction { @@ -45,7 +46,7 @@ protected void documentation(StringBuilder sb) { @Override public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { PendingClusterTasksRequest pendingClusterTasksRequest = new PendingClusterTasksRequest(); - pendingClusterTasksRequest.masterNodeTimeout(request.paramAsTime("master_timeout", pendingClusterTasksRequest.masterNodeTimeout())); + pendingClusterTasksRequest.masterNodeTimeout(getMasterNodeTimeout(request)); pendingClusterTasksRequest.local(request.paramAsBoolean("local", pendingClusterTasksRequest.local())); return channel -> client.execute( TransportPendingClusterTasksAction.TYPE, diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java index 7aba2c8e38a6d..0e459b53d203c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestPluginsAction.java @@ -31,6 +31,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPluginsAction extends AbstractCatAction { @@ -56,7 +57,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java index 4e32a3635872f..5744923b86d6c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Cat API class to display information about snapshot repositories @@ -38,7 +39,7 @@ public List routes() { protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); - getRepositoriesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getRepositoriesRequest.masterNodeTimeout())); + getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java index dda03d515e828..1f11a662c0abf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSegmentsAction.java @@ -33,6 +33,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestSegmentsAction extends AbstractCatAction { @@ -58,7 +59,7 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, final Node final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices); final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 8f26814def98f..664f9b63dee2a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -53,6 +53,7 @@ import java.util.function.Function; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestShardsAction extends AbstractCatAction { @@ -83,7 +84,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final var clusterStateRequest = new ClusterStateRequest(); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices).indicesOptions(IndicesOptions.strictExpandHidden()); return channel -> { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index 9b4c6534a452f..0ff44e37698d9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Cat API class to display information about snapshots @@ -56,7 +57,7 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); - getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); + getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestResponseListener<>(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java index 929de981ce146..849e2d68cb2dc 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTemplatesAction.java @@ -29,6 +29,7 @@ import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestTemplatesAction extends AbstractCatAction { @@ -56,15 +57,13 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient ? new GetIndexTemplatesRequest() : new GetIndexTemplatesRequest(matchPattern); getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); - getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); + getIndexTemplatesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); final GetComposableIndexTemplateAction.Request getComposableTemplatesRequest = new GetComposableIndexTemplateAction.Request( matchPattern ); getComposableTemplatesRequest.local(request.paramAsBoolean("local", getComposableTemplatesRequest.local())); - getComposableTemplatesRequest.masterNodeTimeout( - request.paramAsTime("master_timeout", getComposableTemplatesRequest.masterNodeTimeout()) - ); + getComposableTemplatesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 9ca0dae8c8740..260ce4a3aeb3d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -45,6 +45,7 @@ import static org.elasticsearch.common.util.set.Sets.addToCopy; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestThreadPoolAction extends AbstractCatAction { @@ -72,7 +73,7 @@ public RestChannelConsumer doCatRequest(final RestRequest request, final NodeCli final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); clusterStateRequest.clear().nodes(true); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); - clusterStateRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterStateRequest.masterNodeTimeout())); + clusterStateRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener(channel) { @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java index 3049b9096004e..bf78612ccf5a9 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestDeletePipelineAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestDeletePipelineAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { DeletePipelineRequest request = new DeletePipelineRequest(restRequest.param("id")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); return channel -> client.admin().cluster().deletePipeline(request, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java index e87a78c6b658e..d6712b44f3e03 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestGetPipelineAction.java @@ -22,6 +22,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetPipelineAction extends BaseRestHandler { @@ -42,7 +43,7 @@ public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient cl restRequest.paramAsBoolean("summary", false), Strings.splitStringByCommaToArray(restRequest.param("id")) ); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.admin().cluster().getPipeline(request, new RestToXContentListener<>(channel, GetPipelineResponse::status)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java index 30b3448a04883..907479bddff16 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/ingest/RestPutPipelineAction.java @@ -24,6 +24,7 @@ import java.util.Locale; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestPutPipelineAction extends BaseRestHandler { @@ -54,7 +55,7 @@ public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient cl Tuple sourceTuple = restRequest.contentOrSourceParam(); PutPipelineRequest request = new PutPipelineRequest(restRequest.param("id"), sourceTuple.v2(), sourceTuple.v1(), ifVersion); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); return channel -> client.admin().cluster().putPipeline(request, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index e9e2122c237c6..6098ea777d38a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -39,6 +39,7 @@ import java.util.function.Supplier; import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; /** * Test for serialization and parsing of {@link ClusterRerouteRequest} and its commands. See the superclass for, well, everything. @@ -202,7 +203,7 @@ private RestRequest toRestRequest(ClusterRerouteRequest original) throws IOExcep params.put("retry_failed", Boolean.toString(original.isRetryFailed())); } if (false == original.masterNodeTimeout().equals(MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT) || randomBoolean()) { - params.put("master_timeout", original.masterNodeTimeout().toString()); + params.put(REST_MASTER_TIMEOUT_PARAM, original.masterNodeTimeout().toString()); } if (original.getCommands() != null) { hasBody = true; diff --git a/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java b/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java index ca516a00af239..e898b852c6c39 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestUtilsTests.java @@ -9,7 +9,9 @@ package org.elasticsearch.rest; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; import java.util.HashMap; import java.util.Locale; @@ -185,4 +187,19 @@ private void assertCorsSettingRegexMatches(String settingsValue, boolean expectM ); } } + + public void testGetMasterNodeTimeout() { + assertEquals( + TimeValue.timeValueSeconds(30), + RestUtils.getMasterNodeTimeout(new FakeRestRequest.Builder(xContentRegistry()).build()) + ); + + final var timeout = randomTimeValue(); + assertEquals( + timeout, + RestUtils.getMasterNodeTimeout( + new FakeRestRequest.Builder(xContentRegistry()).withParams(Map.of("master_timeout", timeout.getStringRep())).build() + ) + ); + } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java index e9de0f76df9cf..4517f9cd353c6 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestAddVotingConfigExclusionActionTests.java @@ -18,6 +18,7 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction.resolveVotingConfigExclusionsRequest; public class RestAddVotingConfigExclusionActionTests extends ESTestCase { @@ -69,7 +70,7 @@ public void testResolveVotingConfigExclusionsRequestTimeout() { public void testResolveVotingConfigExclusionsRequestMasterTimeout() { Map params = new HashMap<>(); params.put("node_names", "node-1,node-2,node-3"); - params.put("master_timeout", "60s"); + params.put(REST_MASTER_TIMEOUT_PARAM, "60s"); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) .withPath("/_cluster/voting_config_exclusions") .withParams(params) @@ -84,7 +85,7 @@ public void testResolveVotingConfigExclusionsRequestTimeoutAndMasterTimeout() { Map params = new HashMap<>(); params.put("node_names", "node-1,node-2,node-3"); params.put("timeout", "60s"); - params.put("master_timeout", "120s"); + params.put(REST_MASTER_TIMEOUT_PARAM, "120s"); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) .withPath("/_cluster/voting_config_exclusions") .withParams(params) diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java index d77d660e8d2d6..28f1c3d999aef 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClearVotingConfigExclusionActionTests.java @@ -15,6 +15,7 @@ import java.util.Map; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction.resolveVotingConfigExclusionsRequest; public class RestClearVotingConfigExclusionActionTests extends ESTestCase { @@ -35,7 +36,7 @@ public void testResolveRequestParameters() { final var request = resolveVotingConfigExclusionsRequest( new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.DELETE) .withPath("/_cluster/voting_config_exclusions") - .withParams(Map.of("master_timeout", "60s", "wait_for_removal", "false")) + .withParams(Map.of(REST_MASTER_TIMEOUT_PARAM, "60s", "wait_for_removal", "false")) .build() ); assertEquals(TimeValue.timeValueMinutes(1), request.masterNodeTimeout()); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java index 15f84c5e455b8..c3c1213f320bc 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthActionTests.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.object.HasToString.hasToString; @@ -45,7 +46,7 @@ public void testFromRequest() { params.put("index", index); params.put("local", String.valueOf(local)); - params.put("master_timeout", masterTimeout.getStringRep()); + params.put(REST_MASTER_TIMEOUT_PARAM, masterTimeout.getStringRep()); params.put("timeout", timeout.getStringRep()); params.put("wait_for_status", waitForStatus.name()); if (waitForNoRelocatingShards || randomBoolean()) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index 139c6e04c32d4..7c8ebc5a66e80 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestCcrStatsAction extends BaseRestHandler { @@ -41,7 +42,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina if (restRequest.hasParam("timeout")) { request.setTimeout(restRequest.paramAsTime("timeout", null)); } - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute( CcrStatsAction.INSTANCE, request, diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java index 9e94e27f55811..1f96ea6be9dc5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestDeleteAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction.INSTANCE; public class RestDeleteAutoFollowPatternAction extends BaseRestHandler { @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { Request request = new Request(restRequest.param("name")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java index 86be9487417f8..e20c34fe38243 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestFollowInfoAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { final FollowInfoAction.Request request = new FollowInfoAction.Request(); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.setFollowerIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); return channel -> client.execute(FollowInfoAction.INSTANCE, request, new RestRefCountedChunkedToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java index 519888a93afdf..84a8d4f879e02 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestGetAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction.INSTANCE; public class RestGetAutoFollowPatternAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { Request request = new Request(); request.setName(restRequest.param("name")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java index 6846d96a2f015..5a2ba2fe736f7 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.ActivateAutoFollowPatternAction.INSTANCE; public class RestPauseAutoFollowPatternAction extends BaseRestHandler { @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { Request request = new Request(restRequest.param("name"), false); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java index b08fbb039cbc3..8c0f79f0b2440 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPauseFollowAction.java @@ -14,6 +14,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.PauseFollowAction.INSTANCE; import static org.elasticsearch.xpack.core.ccr.action.PauseFollowAction.Request; @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { Request request = new Request(restRequest.param("index")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java index dd432411014ab..cb42431022501 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutAutoFollowPatternAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction.INSTANCE; public class RestPutAutoFollowPatternAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient private static Request createRequest(RestRequest restRequest) throws IOException { try (XContentParser parser = restRequest.contentOrSourceParamParser()) { Request request = Request.fromXContent(parser, restRequest.param("name")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return request; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java index 5ceef134090a4..162431d68fb0f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.PutFollowAction.INSTANCE; import static org.elasticsearch.xpack.core.ccr.action.PutFollowAction.Request; @@ -43,7 +44,7 @@ private static Request createRequest(RestRequest restRequest) throws IOException final Request request = Request.fromXContent(parser); request.waitForActiveShards(ActiveShardCount.parseString(restRequest.param("wait_for_active_shards"))); request.setFollowerIndex(restRequest.param("index")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return request; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java index a6ccf9fe1fa0b..3e51386ef1069 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeAutoFollowPatternAction.java @@ -15,6 +15,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.ActivateAutoFollowPatternAction.INSTANCE; public class RestResumeAutoFollowPatternAction extends BaseRestHandler { @@ -32,7 +33,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { Request request = new Request(restRequest.param("name"), true); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java index d3994c487e456..86a00ca1ff020 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestResumeFollowAction.java @@ -16,6 +16,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.INSTANCE; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request; @@ -47,7 +48,7 @@ static Request createRequest(RestRequest restRequest) throws IOException { request = new Request(); request.setFollowerIndex(restRequest.param("index")); } - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return request; } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java index 5936fc05cb449..acc6ffb0a67bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowAction.java @@ -16,6 +16,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ccr.action.UnfollowAction.INSTANCE; public class RestUnfollowAction extends BaseRestHandler { @@ -33,7 +34,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { UnfollowAction.Request request = new UnfollowAction.Request(restRequest.param("index")); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java index 044866ad07cb5..abb03e6e3037e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestDeleteLicenseAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteLicenseAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { AcknowledgedRequest.Plain deleteLicenseRequest = new AcknowledgedRequest.Plain(); deleteLicenseRequest.ackTimeout(request.paramAsTime("timeout", deleteLicenseRequest.ackTimeout())); - deleteLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteLicenseRequest.masterNodeTimeout())); + deleteLicenseRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.admin() .cluster() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java index b3e436d83165d..0d60be455ff29 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartBasicLicense.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPostStartBasicLicense extends BaseRestHandler { @@ -34,7 +35,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli PostStartBasicRequest startBasicRequest = new PostStartBasicRequest(); startBasicRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); startBasicRequest.ackTimeout(request.paramAsTime("timeout", startBasicRequest.ackTimeout())); - startBasicRequest.masterNodeTimeout(request.paramAsTime("master_timeout", startBasicRequest.masterNodeTimeout())); + startBasicRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( PostStartBasicAction.INSTANCE, startBasicRequest, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java index 413c0d5ba0732..8f954d61548e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPutLicenseAction.java @@ -18,6 +18,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPutLicenseAction extends BaseRestHandler { @@ -46,7 +47,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putLicenseRequest.license(request.content(), request.getXContentType()); putLicenseRequest.acknowledge(request.paramAsBoolean("acknowledge", false)); putLicenseRequest.ackTimeout(request.paramAsTime("timeout", putLicenseRequest.ackTimeout())); - putLicenseRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putLicenseRequest.masterNodeTimeout())); + putLicenseRequest.masterNodeTimeout(getMasterNodeTimeout(request)); if (License.LicenseType.isBasic(putLicenseRequest.license().type())) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java index 4bd3140753f5c..59a2cf3c936db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestXPackUsageAction.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.core.rest.action; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.http.HttpChannel; @@ -27,6 +26,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestXPackUsageAction extends BaseRestHandler { @@ -43,7 +43,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final TimeValue masterTimeout = request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT); + final TimeValue masterTimeout = getMasterNodeTimeout(request); final HttpChannel httpChannel = request.getHttpChannel(); return channel -> new XPackUsageRequestBuilder(new RestCancellableNodeClient(client, httpChannel)).setMasterNodeTimeout( masterTimeout diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java index 29c008d4c3128..4c63ef72adcb5 100644 --- a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/rest/action/RestFreezeIndexAction.java @@ -26,6 +26,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.GONE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public final class RestFreezeIndexAction extends BaseRestHandler { @@ -49,7 +50,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli // translate to a get indices request, so that we'll 404 on non-existent indices final GetIndexRequest getIndexRequest = new GetIndexRequest(); getIndexRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); - getIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexRequest.masterNodeTimeout())); + getIndexRequest.masterNodeTimeout(getMasterNodeTimeout(request)); getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener<>(channel) { @Override @@ -63,7 +64,7 @@ public RestResponse buildResponse(GetIndexResponse getIndexResponse, XContentBui FreezeRequest freezeRequest = new FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); freezeRequest.ackTimeout(request.paramAsTime("timeout", freezeRequest.ackTimeout())); - freezeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", freezeRequest.masterNodeTimeout())); + freezeRequest.masterNodeTimeout(getMasterNodeTimeout(request)); freezeRequest.indicesOptions(IndicesOptions.fromRequest(request, freezeRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java index 9cd598f85c00f..b6324ba671162 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestDeleteLifecycleAction.java @@ -16,6 +16,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestDeleteLifecycleAction extends BaseRestHandler { @@ -34,7 +35,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String lifecycleName = restRequest.param("name"); DeleteLifecycleAction.Request deleteLifecycleRequest = new DeleteLifecycleAction.Request(lifecycleName); deleteLifecycleRequest.ackTimeout(restRequest.paramAsTime("timeout", deleteLifecycleRequest.ackTimeout())); - deleteLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", deleteLifecycleRequest.masterNodeTimeout())); + deleteLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(DeleteLifecycleAction.INSTANCE, deleteLifecycleRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java index beae3f4d18194..195f989eab055 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestExplainLifecycleAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestExplainLifecycleAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient explainLifecycleRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); explainLifecycleRequest.onlyManaged(restRequest.paramAsBoolean("only_managed", false)); explainLifecycleRequest.onlyErrors(restRequest.paramAsBoolean("only_errors", false)); - explainLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", explainLifecycleRequest.masterNodeTimeout())); + explainLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ExplainLifecycleAction.INSTANCE, explainLifecycleRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java index 9631558649ed0..321d7b722c1c9 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestGetLifecycleAction extends BaseRestHandler { @@ -36,7 +37,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String[] lifecycleNames = Strings.splitStringByCommaToArray(restRequest.param("name")); GetLifecycleAction.Request getLifecycleRequest = new GetLifecycleAction.Request(lifecycleNames); getLifecycleRequest.ackTimeout(restRequest.paramAsTime("timeout", getLifecycleRequest.ackTimeout())); - getLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", getLifecycleRequest.masterNodeTimeout())); + getLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> new RestCancellableNodeClient(client, restRequest.getHttpChannel()).execute( GetLifecycleAction.INSTANCE, diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java index 1721510fe7f87..91a201045ba61 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetStatusAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestGetStatusAction extends BaseRestHandler { @@ -34,7 +35,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { AcknowledgedRequest.Plain request = new AcknowledgedRequest.Plain(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java index 6d9300f2c6088..f5834f9ae4e46 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestMoveToStepAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient request = TransportMoveToStepAction.Request.parseRequest(index, parser); } request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.MOVE_TO_STEP, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java index 6240ba1a97574..8bd14b083a22d 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestPutLifecycleAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestPutLifecycleAction extends BaseRestHandler { @@ -38,7 +39,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient try (XContentParser parser = restRequest.contentParser()) { PutLifecycleRequest putLifecycleRequest = PutLifecycleRequest.parseRequest(lifecycleName, parser); putLifecycleRequest.ackTimeout(restRequest.paramAsTime("timeout", putLifecycleRequest.ackTimeout())); - putLifecycleRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putLifecycleRequest.masterNodeTimeout())); + putLifecycleRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.PUT, putLifecycleRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java index 47e2cd0dc69a1..a011aa3d38b64 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRemoveIndexLifecyclePolicyAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestRemoveIndexLifecyclePolicyAction extends BaseRestHandler { @@ -35,7 +36,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { String[] indexes = Strings.splitStringByCommaToArray(restRequest.param("index")); RemoveIndexLifecyclePolicyAction.Request changePolicyRequest = new RemoveIndexLifecyclePolicyAction.Request(indexes); - changePolicyRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", changePolicyRequest.masterNodeTimeout())); + changePolicyRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); changePolicyRequest.indicesOptions(IndicesOptions.fromRequest(restRequest, changePolicyRequest.indicesOptions())); return channel -> client.execute( diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java index 87c6fd908ad0e..324266b420f25 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestRetryAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestRetryAction extends BaseRestHandler { @@ -37,7 +38,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index")); TransportRetryAction.Request request = new TransportRetryAction.Request(indices); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.indices(indices); request.indicesOptions(IndicesOptions.fromRequest(restRequest, IndicesOptions.strictExpandOpen())); return channel -> client.execute(ILMActions.RETRY, request, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java index 7be09625cc799..bbc359de090d7 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStartILMAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestStartILMAction extends BaseRestHandler { @@ -34,7 +35,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StartILMRequest request = new StartILMRequest(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.START, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java index cb71a7ecb10b9..93704e2ab824f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestStopAction.java @@ -17,6 +17,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestStopAction extends BaseRestHandler { @@ -34,7 +35,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StopILMRequest request = new StopILMRequest(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(ILMActions.STOP, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java index d58e06a35dcaa..56b0436df2c90 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestMlMemoryAction.java @@ -21,13 +21,14 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; @ServerlessScope(Scope.INTERNAL) public class RestMlMemoryAction extends BaseRestHandler { public static final String NODE_ID = "nodeId"; - public static final String MASTER_TIMEOUT = "master_timeout"; + public static final String MASTER_TIMEOUT = REST_MASTER_TIMEOUT_PARAM; public static final String TIMEOUT = "timeout"; @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java index f60b8581e3769..6641aaf66a103 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -43,7 +44,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { SetUpgradeModeAction.Request request = new SetUpgradeModeAction.Request(restRequest.paramAsBoolean("enabled", false)); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(SetUpgradeModeAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java index d9ea3b006b7e0..d78a9edb50753 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestDeleteDatafeedAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -49,7 +50,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient request.setForce(restRequest.paramAsBoolean(CloseJobAction.Request.FORCE.getPreferredName(), request.isForce())); } request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(DeleteDatafeedAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java index c216afef89fdb..903deb27d2dd9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -52,7 +53,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient putDatafeedRequest = PutDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); } putDatafeedRequest.ackTimeout(restRequest.paramAsTime("timeout", putDatafeedRequest.ackTimeout())); - putDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putDatafeedRequest.masterNodeTimeout())); + putDatafeedRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(PutDatafeedAction.INSTANCE, putDatafeedRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java index f51f37715cdc4..c11f4ad367812 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -58,7 +59,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient updateDatafeedRequest = UpdateDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); } updateDatafeedRequest.ackTimeout(restRequest.paramAsTime("timeout", updateDatafeedRequest.ackTimeout())); - updateDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", updateDatafeedRequest.masterNodeTimeout())); + updateDatafeedRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(UpdateDatafeedAction.INSTANCE, updateDatafeedRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java index d26d0091e1acf..2c3f84401b912 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/filter/RestDeleteFilterAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -44,7 +45,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { Request request = new Request(restRequest.param(Request.FILTER_ID.getPreferredName())); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(DeleteFilterAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java index db21a4278df24..29c63f6f60fcc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestUpdateTrainedModelDeploymentAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestUpdateTrainedModelDeploymentAction extends BaseRestHandler { @@ -51,7 +52,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient XContentParser parser = restRequest.contentParser(); UpdateTrainedModelDeploymentAction.Request request = UpdateTrainedModelDeploymentAction.Request.parseRequest(modelId, parser); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(UpdateTrainedModelDeploymentAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java index 81cc9ab036bb9..659fb6ba2e271 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestDeleteJobAction.java @@ -26,6 +26,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -51,7 +52,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient DeleteJobAction.Request deleteJobRequest = new DeleteJobAction.Request(restRequest.param(Job.ID.getPreferredName())); deleteJobRequest.setForce(restRequest.paramAsBoolean(CloseJobAction.Request.FORCE.getPreferredName(), deleteJobRequest.isForce())); deleteJobRequest.ackTimeout(restRequest.paramAsTime("timeout", deleteJobRequest.ackTimeout())); - deleteJobRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", deleteJobRequest.masterNodeTimeout())); + deleteJobRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); deleteJobRequest.setDeleteUserAnnotations(restRequest.paramAsBoolean("delete_user_annotations", false)); if (restRequest.paramAsBoolean("wait_for_completion", true)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java index 6add232cfecb8..a3cb1016756e1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPostJobUpdateAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -47,7 +48,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient XContentParser parser = restRequest.contentParser(); UpdateJobAction.Request updateJobRequest = UpdateJobAction.Request.parseRequest(jobId, parser); updateJobRequest.ackTimeout(restRequest.paramAsTime("timeout", updateJobRequest.ackTimeout())); - updateJobRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", updateJobRequest.masterNodeTimeout())); + updateJobRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(UpdateJobAction.INSTANCE, updateJobRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java index 7bff218114b71..b8ce60519189f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestPutJobAction.java @@ -23,6 +23,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -50,7 +51,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient IndicesOptions indicesOptions = IndicesOptions.fromRequest(restRequest, SearchRequest.DEFAULT_INDICES_OPTIONS); PutJobAction.Request putJobRequest = PutJobAction.Request.parseRequest(jobId, parser, indicesOptions); putJobRequest.ackTimeout(restRequest.paramAsTime("timeout", putJobRequest.ackTimeout())); - putJobRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putJobRequest.masterNodeTimeout())); + putJobRequest.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(PutJobAction.INSTANCE, putJobRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java index 28271ec578fb0..39fe102ee08be 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/job/RestResetJobAction.java @@ -25,6 +25,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; @ServerlessScope(Scope.PUBLIC) @@ -44,7 +45,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { ResetJobAction.Request request = new ResetJobAction.Request(restRequest.param(Job.ID.getPreferredName())); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.setDeleteUserAnnotations(restRequest.paramAsBoolean("delete_user_annotations", false)); if (restRequest.paramAsBoolean("wait_for_completion", true)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java index 4478552a22a9e..356e7cdd49635 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/modelsnapshots/RestRevertModelSnapshotAction.java @@ -21,6 +21,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; import static org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction.Request.SNAPSHOT_ID; import static org.elasticsearch.xpack.ml.MachineLearning.BASE_PATH; import static org.elasticsearch.xpack.ml.MachineLearning.PRE_V7_BASE_PATH; @@ -66,7 +67,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient ); } request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(RevertModelSnapshotAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java index 9bfea90a28489..f57ed36540f39 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/http/PublishableHttpResource.java @@ -32,6 +32,7 @@ import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; /** * {@code PublishableHttpResource} represents an {@link HttpResource} that is a single file or object that can be checked and @@ -112,7 +113,7 @@ protected PublishableHttpResource( final Map parameters = Maps.newMapWithExpectedSize(baseParameters.size() + 1); parameters.putAll(baseParameters); - parameters.put("master_timeout", masterTimeout.toString()); + parameters.put(REST_MASTER_TIMEOUT_PARAM, masterTimeout.toString()); this.defaultParameters = Collections.unmodifiableMap(parameters); } else { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java index b72891708e780..cbb225462b858 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/AbstractPublishableHttpResourceTestCase.java @@ -33,6 +33,7 @@ import static java.util.stream.Collectors.joining; import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockBooleanActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.mockPublishResultActionListener; import static org.elasticsearch.xpack.monitoring.exporter.http.AsyncHttpResourceHelper.whenPerformRequestAsyncWith; @@ -228,9 +229,9 @@ protected void assertParameters(final PublishableHttpResource resource) { final Map parameters = new HashMap<>(resource.getDefaultParameters()); if (masterTimeout != null && TimeValue.MINUS_ONE.equals(masterTimeout) == false) { - assertThat(parameters.remove("master_timeout"), is(masterTimeout.toString())); + assertThat(parameters.remove(REST_MASTER_TIMEOUT_PARAM), is(masterTimeout.toString())); } else { - assertFalse(parameters.containsKey("master_timeout")); + assertFalse(parameters.containsKey(REST_MASTER_TIMEOUT_PARAM)); } assertThat(parameters.remove("filter_path"), is("$NONE")); @@ -241,9 +242,9 @@ protected void assertVersionParameters(final PublishableHttpResource resource) { final Map parameters = new HashMap<>(resource.getDefaultParameters()); if (masterTimeout != null && TimeValue.MINUS_ONE.equals(masterTimeout) == false) { - assertThat(parameters.remove("master_timeout"), is(masterTimeout.toString())); + assertThat(parameters.remove(REST_MASTER_TIMEOUT_PARAM), is(masterTimeout.toString())); } else { - assertFalse(parameters.containsKey("master_timeout")); + assertFalse(parameters.containsKey(REST_MASTER_TIMEOUT_PARAM)); } assertThat(parameters.remove("filter_path"), is("*.version")); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java index 64237886d5f7b..fec656e76cc0d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterTests.java @@ -49,6 +49,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -764,7 +765,10 @@ private void assertMasterTimeoutSet(final List resources if (timeout != null) { for (final HttpResource resource : resources) { if (resource instanceof PublishableHttpResource) { - assertEquals(timeout.getStringRep(), ((PublishableHttpResource) resource).getDefaultParameters().get("master_timeout")); + assertEquals( + timeout.getStringRep(), + ((PublishableHttpResource) resource).getDefaultParameters().get(REST_MASTER_TIMEOUT_PARAM) + ); } } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java index e1f8ac16f15ec..c5fcde1f7ec94 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/rest/RestGetStatusAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.PUBLIC) public class RestGetStatusAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { GetStatusAction.Request request = new GetStatusAction.Request(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); request.waitForResourcesCreated(restRequest.paramAsBoolean("wait_for_resources_created", false)); return channel -> client.execute( GetStatusAction.INSTANCE, diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java index 9dec9fb86e26c..4fcf87bd8bf1f 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.searchablesnapshots.rest; -import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -19,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestMountSearchableSnapshotAction extends BaseRestHandler { @Override @@ -36,7 +36,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli MountSearchableSnapshotRequest mountSearchableSnapshotRequest = MountSearchableSnapshotRequest.PARSER.apply( request.contentParser(), request - ).masterNodeTimeout(request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT)); + ).masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute( MountSearchableSnapshotAction.INSTANCE, mountSearchableSnapshotRequest, diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java index b97ea82d3d73f..44b7461fe70cd 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestDeleteShutdownNodeAction.java @@ -14,6 +14,8 @@ import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestDeleteShutdownNodeAction extends BaseRestHandler { @Override @@ -35,7 +37,7 @@ public boolean canTripCircuitBreaker() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { String nodeId = request.param("nodeId"); final var parsedRequest = new DeleteShutdownNodeAction.Request(nodeId); - parsedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", parsedRequest.masterNodeTimeout())); + parsedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(DeleteShutdownNodeAction.INSTANCE, parsedRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java index a1a987b57b233..c2efaa6e1c11b 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestPutShutdownNodeAction.java @@ -16,6 +16,8 @@ import java.io.IOException; import java.util.List; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; + public class RestPutShutdownNodeAction extends BaseRestHandler { @Override @@ -38,7 +40,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String nodeId = request.param("nodeId"); try (XContentParser parser = request.contentParser()) { PutShutdownNodeAction.Request parsedRequest = PutShutdownNodeAction.Request.parseRequest(nodeId, parser); - parsedRequest.masterNodeTimeout(request.paramAsTime("master_timeout", parsedRequest.masterNodeTimeout())); + parsedRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(PutShutdownNodeAction.INSTANCE, parsedRequest, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java index 5d9a692a0876c..e67d7dd2e2fe9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestDeleteSnapshotLifecycleAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.DELETE; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestDeleteSnapshotLifecycleAction extends BaseRestHandler { @@ -37,7 +38,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String lifecycleId = request.param("name"); DeleteSnapshotLifecycleAction.Request req = new DeleteSnapshotLifecycleAction.Request(lifecycleId); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(DeleteSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java index 622021ca87a89..7de23c04d8e91 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotLifecycleAction.java @@ -19,6 +19,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestExecuteSnapshotLifecycleAction extends BaseRestHandler { @@ -38,7 +39,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String snapLifecycleId = request.param("name"); ExecuteSnapshotLifecycleAction.Request req = new ExecuteSnapshotLifecycleAction.Request(snapLifecycleId); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(ExecuteSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java index 2175839886022..84943e361b94b 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestExecuteSnapshotRetentionAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestExecuteSnapshotRetentionAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { ExecuteSnapshotRetentionAction.Request req = new ExecuteSnapshotRetentionAction.Request(); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(ExecuteSnapshotRetentionAction.INSTANCE, req, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java index 278e15a9f9b36..5e4ea3002e614 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSLMStatusAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestGetSLMStatusAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { AcknowledgedRequest.Plain request = new AcknowledgedRequest.Plain(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(GetSLMStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java index c6609d85ca1c3..3818947488bfe 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestGetSnapshotLifecycleAction extends BaseRestHandler { @@ -38,7 +39,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli String[] lifecycleNames = Strings.splitStringByCommaToArray(request.param("name")); GetSnapshotLifecycleAction.Request req = new GetSnapshotLifecycleAction.Request(lifecycleNames); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(GetSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java index 5fea0905f04bd..3a177dfa467be 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestGetSnapshotLifecycleStatsAction.java @@ -19,6 +19,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestGetSnapshotLifecycleStatsAction extends BaseRestHandler { @@ -37,7 +38,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { AcknowledgedRequest.Plain req = new AcknowledgedRequest.Plain(); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(GetSnapshotLifecycleStatsAction.INSTANCE, req, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java index 968f043f61bd6..8066ab2575385 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestPutSnapshotLifecycleAction.java @@ -20,6 +20,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.PUT; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestPutSnapshotLifecycleAction extends BaseRestHandler { @@ -40,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli try (XContentParser parser = request.contentParser()) { PutSnapshotLifecycleAction.Request req = PutSnapshotLifecycleAction.Request.parseRequest(snapLifecycleName, parser); req.ackTimeout(request.paramAsTime("timeout", req.ackTimeout())); - req.masterNodeTimeout(request.paramAsTime("master_timeout", req.masterNodeTimeout())); + req.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> client.execute(PutSnapshotLifecycleAction.INSTANCE, req, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java index 4db876149e784..ab41973f640ac 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStartSLMAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestStartSLMAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StartSLMAction.Request request = new StartSLMAction.Request(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(StartSLMAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java index 9131dcc15cf78..58c96a64195d5 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/RestStopSLMAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; @ServerlessScope(Scope.INTERNAL) public class RestStopSLMAction extends BaseRestHandler { @@ -36,7 +37,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { StopSLMAction.Request request = new StopSLMAction.Request(); request.ackTimeout(restRequest.paramAsTime("timeout", request.ackTimeout())); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(StopSLMAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java index 30792cce5dfb3..7824f9f46c2f6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/rest/action/RestWatchServiceAction.java @@ -18,6 +18,7 @@ import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; public class RestWatchServiceAction extends BaseRestHandler { @@ -55,7 +56,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) { final WatcherServiceRequest request = new WatcherServiceRequest().stop(); - request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + request.masterNodeTimeout(getMasterNodeTimeout(restRequest)); return channel -> client.execute(WatcherServiceAction.INSTANCE, request, new RestToXContentListener<>(channel)); } } diff --git a/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java b/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java index bef0bbb1ee3c5..63193b86e3fd1 100644 --- a/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java +++ b/x-pack/qa/freeze-plugin/src/main/java/org/elasticsearch/plugin/freeze/FreezeIndexPlugin.java @@ -35,6 +35,7 @@ import java.util.function.Supplier; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.RestUtils.getMasterNodeTimeout; /** * Restores the REST endpoint for freezing indices so that the JDBC tests can still freeze indices @@ -76,7 +77,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli boolean freeze = request.path().endsWith("/_freeze"); FreezeRequest freezeRequest = new FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); freezeRequest.ackTimeout(request.paramAsTime("timeout", freezeRequest.ackTimeout())); - freezeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", freezeRequest.masterNodeTimeout())); + freezeRequest.masterNodeTimeout(getMasterNodeTimeout(request)); freezeRequest.indicesOptions(IndicesOptions.fromRequest(request, freezeRequest.indicesOptions())); String waitForActiveShards = request.param("wait_for_active_shards"); if (waitForActiveShards != null) { From 4380cd1bd5892e3dc33d03ea3a8eb8f9c24da881 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 29 Apr 2024 08:48:12 +0100 Subject: [PATCH 004/244] Allow rescorer with field collapsing (#107779) This change adds the support for rescoring collapsed documents. The rescoring is applied on the top document per group on each shard. Closes #27243 --- docs/changelog/107779.yaml | 6 + .../collapse-search-results.asciidoc | 66 ++++++++- .../learning-to-rank-search-usage.asciidoc | 6 - rest-api-spec/build.gradle | 1 + .../test/search/110_field_collapsing.yml | 18 --- .../112_field_collapsing_with_rescore.yml | 107 ++++++++++++++ .../search/functionscore/QueryRescorerIT.java | 138 ++++++++++++++++++ .../action/search/SearchRequest.java | 3 - .../elasticsearch/search/SearchService.java | 3 - .../query/QueryPhaseCollectorManager.java | 61 ++++---- .../search/rescore/RescorePhase.java | 44 +++++- .../action/search/SearchRequestTests.java | 12 -- .../search/SearchServiceTests.java | 8 - x-pack/qa/runtime-fields/build.gradle | 1 + 14 files changed, 389 insertions(+), 85 deletions(-) create mode 100644 docs/changelog/107779.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/112_field_collapsing_with_rescore.yml diff --git a/docs/changelog/107779.yaml b/docs/changelog/107779.yaml new file mode 100644 index 0000000000000..a41c19a2329e0 --- /dev/null +++ b/docs/changelog/107779.yaml @@ -0,0 +1,6 @@ +pr: 107779 +summary: Allow rescorer with field collapsing +area: Search +type: enhancement +issues: + - 27243 \ No newline at end of file diff --git a/docs/reference/search/search-your-data/collapse-search-results.asciidoc b/docs/reference/search/search-your-data/collapse-search-results.asciidoc index ffb6238c89e10..f88fa0d4aca15 100644 --- a/docs/reference/search/search-your-data/collapse-search-results.asciidoc +++ b/docs/reference/search/search-your-data/collapse-search-results.asciidoc @@ -47,7 +47,7 @@ NOTE: Collapsing is applied to the top hits only and does not affect aggregation [[expand-collapse-results]] ==== Expand collapse results -It is also possible to expand each collapsed top hits with the `inner_hits` option. +It is also possible to expand each collapsed top hits with the <> option. [source,console] ---- @@ -86,7 +86,7 @@ GET /my-index-000001/_search See <> for the complete list of supported options and the format of the response. -It is also possible to request multiple `inner_hits` for each collapsed hit. This can be useful when you want to get +It is also possible to request multiple <> for each collapsed hit. This can be useful when you want to get multiple representations of the collapsed hits. [source,console] @@ -145,8 +145,7 @@ The `max_concurrent_group_searches` request parameter can be used to control the maximum number of concurrent searches allowed in this phase. The default is based on the number of data nodes and the default search thread pool size. -WARNING: `collapse` cannot be used in conjunction with <> or -<>. +WARNING: `collapse` cannot be used in conjunction with <>. [discrete] [[collapsing-with-search-after]] @@ -175,6 +174,65 @@ GET /my-index-000001/_search ---- // TEST[setup:my_index] +[discrete] +[[rescore-collapse-results]] +==== Rescore collapse results + +You can use field collapsing alongside the <> search parameter. +Rescorers run on every shard for the top-ranked document per collapsed field. +To maintain a reliable order, it is recommended to cluster documents sharing the same collapse +field value on one shard. +This is achieved by assigning the collapse field value as the <> +during indexing: + +[source,console] +---- +POST /my-index-000001/_doc?routing=xyz <1> +{ + "@timestamp": "2099-11-15T13:12:00", + "message": "You know for search!", + "user.id": "xyz" +} +---- +// TEST[setup:my_index] +<1> Assign routing with the collapse field value (`user.id`). + +By doing this, you guarantee that only one top document per +collapse key gets rescored globally. + +The following request utilizes field collapsing on the `user.id` +field and then rescores the top groups with a <>: + +[source,console] +---- +GET /my-index-000001/_search +{ + "query": { + "match": { + "message": "you know for search" + } + }, + "collapse": { + "field": "user.id" + }, + "rescore" : { + "window_size" : 50, + "query" : { + "rescore_query" : { + "match_phrase": { + "message": "you know for search" + } + }, + "query_weight" : 0.3, + "rescore_query_weight" : 1.4 + } + } +} +---- +// TEST[setup:my_index] + +WARNING: Rescorers are not applied to <>. + [discrete] [[second-level-of-collapsing]] ==== Second level of collapsing diff --git a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc index 1d040a116ad9a..2e9693eff0451 100644 --- a/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc +++ b/docs/reference/search/search-your-data/learning-to-rank-search-usage.asciidoc @@ -64,12 +64,6 @@ When exposing pagination to users, `window_size` should remain constant as each Depending on how your model is trained, it’s possible that the model will return negative scores for documents. While negative scores are not allowed from first-stage retrieval and ranking, it is possible to use them in the LTR rescorer. -[discrete] -[[learning-to-rank-rescorer-limitations-field-collapsing]] -====== Compatibility with field collapsing - -LTR rescorers are not compatible with the <>. - [discrete] [[learning-to-rank-rescorer-limitations-term-statistics]] ====== Term statistics as features diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 146c78e3c8471..089b7470e9a97 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -83,6 +83,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search/370_profile/fetch source", "profile output has changed") task.skipTest("search/370_profile/fetch nested source", "profile output has changed") task.skipTest("search/240_date_nanos/doc value fields are working as expected across date and date_nanos fields", "Fetching docvalues field multiple times is no longer allowed") + task.skipTest("search/110_field_collapsing/field collapsing and rescore", "#107779 Field collapsing is compatible with rescore in 8.15") task.replaceValueInMatch("_type", "_doc") task.addAllowedWarningRegex("\\[types removal\\].*") diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml index 76207fd76e45b..c10d3c48259f1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -281,24 +281,6 @@ setup: - match: { hits.hits.1.fields.numeric_group: [1] } - match: { hits.hits.1.sort: [1] } ---- -"field collapsing and rescore": - - - do: - catch: /cannot use \`collapse\` in conjunction with \`rescore\`/ - search: - rest_total_hits_as_int: true - index: test - body: - collapse: { field: numeric_group } - rescore: - window_size: 20 - query: - rescore_query: - match_all: {} - query_weight: 1 - rescore_query_weight: 2 - --- "no hits and inner_hits": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/112_field_collapsing_with_rescore.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/112_field_collapsing_with_rescore.yml new file mode 100644 index 0000000000000..5048bc8d4307c --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/112_field_collapsing_with_rescore.yml @@ -0,0 +1,107 @@ +setup: + - skip: + version: " - 8.14.99" + reason: Collapse with rescore added in 8.15.0 + - do: + indices.create: + index: products + body: + mappings: + properties: + product_id: { type: keyword } + description: { type: text } + popularity: { type: integer } + + - do: + bulk: + index: products + refresh: true + body: + - '{"index": {"_id": "1", "routing": "0"}}' + - '{"product_id": "0", "description": "flat tv 4K HDR", "score": 2, "popularity": 30}' + - '{"index": {"_id": "2", "routing": "10"}}' + - '{"product_id": "10", "description": "LED Smart TV 32", "score": 5, "popularity": 100}' + - '{"index": {"_id": "3", "routing": "10"}}' + - '{"product_id": "10", "description": "LED Smart TV 65", "score": 10, "popularity": 50}' + - '{"index": {"_id": "4", "routing": "0"}}' + - '{"product_id": "0", "description": "flat tv", "score": 1, "popularity": 10}' + - '{"index": {"_id": "5", "routing": "129"}}' + - '{"product_id": "129", "description": "just a tv", "score": 100, "popularity": 3}' + +--- +"field collapsing and rescore": + - do: + search: + index: products + body: + query: + bool: + filter: + match: + description: "tv" + should: + script_score: + query: { match_all: { } } + script: + source: "doc['score'].value" + collapse: + field: product_id + rescore: + query: + rescore_query: + script_score: + query: { match_all: { } } + script: + source: "doc['popularity'].value" + query_weight: 0 + rescore_query_weight: 1 + + + - match: {hits.total.value: 5 } + - length: {hits.hits: 3 } + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0._score: 50} + - match: {hits.hits.0.fields.product_id: ["10"]} + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._score: 30 } + - match: { hits.hits.1.fields.product_id: ["0"] } + - match: { hits.hits.2._id: "5" } + - match: { hits.hits.2._score: 3 } + - match: { hits.hits.2.fields.product_id: ["129"] } + +--- +"field collapsing and rescore with window_size": + - do: + search: + index: products + body: + query: + bool: + filter: + match: + description: "tv" + should: + script_score: + query: { match_all: { } } + script: + source: "doc['score'].value" + collapse: + field: product_id + rescore: + window_size: 2 + query: + rescore_query: + script_score: + query: { match_all: { } } + script: + source: "doc['popularity'].value" + query_weight: 0 + rescore_query_weight: 1 + size: 1 + + + - match: {hits.total.value: 5 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "3"} + - match: {hits.hits.0._score: 50} + - match: {hits.hits.0.fields.product_id: ["10"]} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index 110ac76849e0b..2b61e6ae5d1ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.rescore.QueryRescoreMode; import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.sort.SortBuilders; @@ -30,8 +31,10 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.lucene.search.function.CombineFunction.REPLACE; @@ -845,4 +848,139 @@ public void testRescorePhaseWithInvalidSort() throws Exception { } ); } + + record GroupDoc(String id, String group, float firstPassScore, float secondPassScore, boolean shouldFilter) {} + + public void testRescoreAfterCollapse() throws Exception { + assertAcked(prepareCreate("test").setMapping("group", "type=keyword", "shouldFilter", "type=boolean")); + ensureGreen("test"); + GroupDoc[] groupDocs = new GroupDoc[] { + new GroupDoc("1", "c", 200, 1, false), + new GroupDoc("2", "a", 1, 10, true), + new GroupDoc("3", "b", 2, 30, false), + new GroupDoc("4", "c", 1, 1000, false), + // should be highest on rescore, but filtered out during collapse + new GroupDoc("5", "b", 1, 40, false), + new GroupDoc("6", "a", 2, 20, false) }; + List requests = new ArrayList<>(); + for (var groupDoc : groupDocs) { + requests.add( + client().prepareIndex("test") + .setId(groupDoc.id()) + .setRouting(groupDoc.group()) + .setSource( + "group", + groupDoc.group(), + "firstPassScore", + groupDoc.firstPassScore(), + "secondPassScore", + groupDoc.secondPassScore(), + "shouldFilter", + groupDoc.shouldFilter() + ) + ); + } + indexRandom(true, requests); + + var request = client().prepareSearch("test") + .setQuery(fieldValueScoreQuery("firstPassScore")) + .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore"))) + .setCollapse(new CollapseBuilder("group")); + assertResponse(request, resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(5L)); + assertThat(resp.getHits().getHits().length, equalTo(3)); + + SearchHit hit1 = resp.getHits().getAt(0); + assertThat(hit1.getId(), equalTo("1")); + assertThat(hit1.getScore(), equalTo(201F)); + assertThat(hit1.field("group").getValues().size(), equalTo(1)); + assertThat(hit1.field("group").getValues().get(0), equalTo("c")); + + SearchHit hit2 = resp.getHits().getAt(1); + assertThat(hit2.getId(), equalTo("3")); + assertThat(hit2.getScore(), equalTo(32F)); + assertThat(hit2.field("group").getValues().size(), equalTo(1)); + assertThat(hit2.field("group").getValues().get(0), equalTo("b")); + + SearchHit hit3 = resp.getHits().getAt(2); + assertThat(hit3.getId(), equalTo("6")); + assertThat(hit3.getScore(), equalTo(22F)); + assertThat(hit3.field("group").getValues().size(), equalTo(1)); + assertThat(hit3.field("group").getValues().get(0), equalTo("a")); + }); + } + + public void testRescoreAfterCollapseRandom() throws Exception { + assertAcked(prepareCreate("test").setMapping("group", "type=keyword", "shouldFilter", "type=boolean")); + ensureGreen("test"); + int numGroups = randomIntBetween(1, 100); + int numDocs = atLeast(100); + GroupDoc[] groups = new GroupDoc[numGroups]; + int numHits = 0; + List requests = new ArrayList<>(); + for (int i = 0; i < numDocs; i++) { + int group = randomIntBetween(0, numGroups - 1); + boolean shouldFilter = rarely(); + String id = randomUUID(); + float firstPassScore = randomFloat(); + float secondPassScore = randomFloat(); + float bestScore = groups[group] == null ? -1 : groups[group].firstPassScore; + var groupDoc = new GroupDoc(id, Integer.toString(group), firstPassScore, secondPassScore, shouldFilter); + if (shouldFilter == false) { + if (firstPassScore == bestScore) { + // avoid tiebreaker + continue; + } + + numHits++; + if (firstPassScore > bestScore) { + groups[group] = groupDoc; + } + } + requests.add( + client().prepareIndex("test") + .setId(groupDoc.id()) + .setRouting(groupDoc.group()) + .setSource( + "group", + groupDoc.group(), + "firstPassScore", + groupDoc.firstPassScore(), + "secondPassScore", + groupDoc.secondPassScore(), + "shouldFilter", + groupDoc.shouldFilter() + ) + ); + } + indexRandom(true, requests); + + GroupDoc[] sortedGroups = Arrays.stream(groups) + .filter(g -> g != null) + .sorted(Comparator.comparingDouble(GroupDoc::secondPassScore).reversed()) + .toArray(GroupDoc[]::new); + + var request = client().prepareSearch("test") + .setQuery(fieldValueScoreQuery("firstPassScore")) + .addRescorer(new QueryRescorerBuilder(fieldValueScoreQuery("secondPassScore")).setQueryWeight(0f).windowSize(numGroups)) + .setCollapse(new CollapseBuilder("group")) + .setSize(Math.min(numGroups, 10)); + long expectedNumHits = numHits; + assertResponse(request, resp -> { + assertThat(resp.getHits().getTotalHits().value, equalTo(expectedNumHits)); + for (int pos = 0; pos < resp.getHits().getHits().length; pos++) { + SearchHit hit = resp.getHits().getAt(pos); + assertThat(hit.getId(), equalTo(sortedGroups[pos].id())); + String group = hit.field("group").getValue(); + assertThat(group, equalTo(sortedGroups[pos].group())); + assertThat(hit.getScore(), equalTo(sortedGroups[pos].secondPassScore)); + } + }); + } + + private QueryBuilder fieldValueScoreQuery(String scoreField) { + return functionScoreQuery(termQuery("shouldFilter", false), ScoreFunctionBuilders.fieldValueFactorFunction(scoreField)).boostMode( + CombineFunction.REPLACE + ); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 12167c8361513..6a95eadc92139 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -366,9 +366,6 @@ public ActionRequestValidationException validate() { validationException ); } - if (source.collapse() != null && source.rescores() != null && source.rescores().isEmpty() == false) { - validationException = addValidationError("cannot use `collapse` in conjunction with `rescore`", validationException); - } if (source.storedFields() != null) { if (source.storedFields().fetchFields() == false) { if (source.fetchSource() != null && source.fetchSource().fetchSource()) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 753b6f8cb710a..41796967c3870 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1455,9 +1455,6 @@ private static void validateSearchSource(SearchSourceBuilder source, boolean has if (hasScroll) { throw new IllegalArgumentException("cannot use `collapse` in a scroll context"); } - if (source.rescores() != null && source.rescores().isEmpty() == false) { - throw new IllegalArgumentException("cannot use `collapse` in conjunction with `rescore`"); - } } if (source.slice() != null) { if (source.pointInTimeBuilder() == null && (hasScroll == false)) { diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java index 7fd09d3ddfdf1..2286eb2e69f88 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhaseCollectorManager.java @@ -256,21 +256,6 @@ static CollectorManager createQueryPhaseCollectorMa searchContext.scrollContext(), searchContext.numberOfShards() ); - } else if (searchContext.collapse() != null) { - boolean trackScores = searchContext.sort() == null || searchContext.trackScores(); - int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); - return forCollapsing( - postFilterWeight, - terminateAfterChecker, - aggsCollectorManager, - searchContext.minimumScore(), - searchContext.getProfilers() != null, - searchContext.collapse(), - searchContext.sort(), - numDocs, - trackScores, - searchContext.searchAfter() - ); } else { int numDocs = Math.min(searchContext.from() + searchContext.size(), totalNumDocs); final boolean rescore = searchContext.rescore().isEmpty() == false; @@ -280,21 +265,37 @@ static CollectorManager createQueryPhaseCollectorMa numDocs = Math.max(numDocs, rescoreContext.getWindowSize()); } } - return new WithHits( - postFilterWeight, - terminateAfterChecker, - aggsCollectorManager, - searchContext.minimumScore(), - searchContext.getProfilers() != null, - reader, - query, - searchContext.sort(), - searchContext.searchAfter(), - numDocs, - searchContext.trackScores(), - searchContext.trackTotalHitsUpTo(), - hasFilterCollector - ); + if (searchContext.collapse() != null) { + boolean trackScores = searchContext.sort() == null || searchContext.trackScores(); + return forCollapsing( + postFilterWeight, + terminateAfterChecker, + aggsCollectorManager, + searchContext.minimumScore(), + searchContext.getProfilers() != null, + searchContext.collapse(), + searchContext.sort(), + numDocs, + trackScores, + searchContext.searchAfter() + ); + } else { + return new WithHits( + postFilterWeight, + terminateAfterChecker, + aggsCollectorManager, + searchContext.minimumScore(), + searchContext.getProfilers() != null, + reader, + query, + searchContext.sort(), + searchContext.searchAfter(), + numDocs, + searchContext.trackScores(), + searchContext.trackTotalHitsUpTo(), + hasFilterCollector + ); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 81f079b74c18f..697aa6099ca97 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -8,13 +8,18 @@ package org.elasticsearch.search.rescore; +import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.util.Maps; +import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Map; /** * Rescore phase of a search request, used to run potentially expensive scoring models against the top matching documents. @@ -24,7 +29,7 @@ public class RescorePhase { private RescorePhase() {} public static void execute(SearchContext context) { - if (context.size() == 0 || context.collapse() != null || context.rescore() == null || context.rescore().isEmpty()) { + if (context.size() == 0 || context.rescore() == null || context.rescore().isEmpty()) { return; } @@ -32,6 +37,11 @@ public static void execute(SearchContext context) { if (topDocs.scoreDocs.length == 0) { return; } + TopFieldGroups topGroups = null; + if (topDocs instanceof TopFieldGroups topFieldGroups) { + assert context.collapse() != null; + topGroups = topFieldGroups; + } try { for (RescoreContext ctx : context.rescore()) { topDocs = ctx.rescorer().rescore(topDocs, context.searcher(), ctx); @@ -39,6 +49,15 @@ public static void execute(SearchContext context) { // here we only assert that this condition is met. assert context.sort() == null && topDocsSortedByScore(topDocs) : "topdocs should be sorted after rescore"; } + if (topGroups != null) { + assert context.collapse() != null; + /** + * Since rescorers don't preserve collapsing, we must reconstruct the group and field + * values from the originalTopGroups to create a new {@link TopFieldGroups} from the + * rescored top documents. + */ + topDocs = rewriteTopGroups(topGroups, topDocs); + } context.queryResult() .topDocs(new TopDocsAndMaxScore(topDocs, topDocs.scoreDocs[0].score), context.queryResult().sortValueFormats()); } catch (IOException e) { @@ -46,6 +65,29 @@ public static void execute(SearchContext context) { } } + private static TopFieldGroups rewriteTopGroups(TopFieldGroups originalTopGroups, TopDocs rescoredTopDocs) { + assert originalTopGroups.fields.length == 1 && SortField.FIELD_SCORE.equals(originalTopGroups.fields[0]) + : "rescore must always sort by score descending"; + Map docIdToGroupValue = Maps.newMapWithExpectedSize(originalTopGroups.scoreDocs.length); + for (int i = 0; i < originalTopGroups.scoreDocs.length; i++) { + docIdToGroupValue.put(originalTopGroups.scoreDocs[i].doc, originalTopGroups.groupValues[i]); + } + var newScoreDocs = new FieldDoc[rescoredTopDocs.scoreDocs.length]; + var newGroupValues = new Object[originalTopGroups.groupValues.length]; + int pos = 0; + for (var doc : rescoredTopDocs.scoreDocs) { + newScoreDocs[pos] = new FieldDoc(doc.doc, doc.score, new Object[] { doc.score }); + newGroupValues[pos++] = docIdToGroupValue.get(doc.doc); + } + return new TopFieldGroups( + originalTopGroups.field, + originalTopGroups.totalHits, + newScoreDocs, + originalTopGroups.fields, + newGroupValues + ); + } + /** * Returns true if the provided docs are sorted by score. */ diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 9fd2cd1206ee8..95750cf6f412d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.search.AbstractSearchTestCase; @@ -277,17 +276,6 @@ public void testValidate() throws IOException { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("[slice] can only be used with [scroll] or [point-in-time] requests", validationErrors.validationErrors().get(0)); } - { - // collapse and rescore - SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder()); - searchRequest.scroll((Scroll) null); - searchRequest.source().collapse(new CollapseBuilder("field")); - searchRequest.source().addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder())); - ActionRequestValidationException validationErrors = searchRequest.validate(); - assertNotNull(validationErrors); - assertEquals(1, validationErrors.validationErrors().size()); - assertEquals("cannot use `collapse` in conjunction with `rescore`", validationErrors.validationErrors().get(0)); - } { // stored fields disabled with _source requested SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder()); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 3c81fd60fe25c..b9053bd5e4078 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -100,7 +100,6 @@ import org.elasticsearch.search.query.NonCountingTermQuery; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.tasks.TaskCancelHelper; @@ -2187,13 +2186,6 @@ public void testParseSourceValidation() { service ); } - { - // collapse and rescore - SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); - searchRequest.source().collapse(new CollapseBuilder("field")); - searchRequest.source().addRescorer(new QueryRescorerBuilder(new MatchAllQueryBuilder())); - assertCreateContextValidation(searchRequest, "cannot use `collapse` in conjunction with `rescore`", indexService, service); - } { // stored fields disabled with _source requested SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); diff --git a/x-pack/qa/runtime-fields/build.gradle b/x-pack/qa/runtime-fields/build.gradle index dd7d0abc24b19..0c7d4ee770ee6 100644 --- a/x-pack/qa/runtime-fields/build.gradle +++ b/x-pack/qa/runtime-fields/build.gradle @@ -73,6 +73,7 @@ subprojects { 'aggregations/range/Date range', //source only date field should also emit values for numbers, it expects strings only 'search/115_multiple_field_collapsing/two levels fields collapsing', // Field collapsing on a runtime field does not work 'search/111_field_collapsing_with_max_score/*', // Field collapsing on a runtime field does not work + 'search/112_field_collapsing_with_rescore/*', // Field collapsing on a runtime field does not work 'field_caps/30_index_filter/Field caps with index filter', // We don't support filtering field caps on runtime fields. What should we do? 'search/350_point_in_time/point-in-time with index filter', // We don't support filtering pit on runtime fields. 'aggregations/filters_bucket/cache busting', // runtime keyword does not support split_queries_on_whitespace From 68b5336e75c23b1775aecc420523607807dda42e Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:16:07 +0300 Subject: [PATCH 005/244] [TEST] restore synthetic source yaml test (#107991) * [TEST] simplify synthetic source yaml test * [TEST] restore synthetic source yaml test --- .../indices.create/20_synthetic_source.yml | 36 +++++++++++++------ 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 9376f3598d6f1..7e0ad2bf28969 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -39,9 +39,9 @@ nested is disabled: --- object with unmapped fields: - - skip: - version: " - " - reason: "mapper.track_ignored_source" + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source - do: indices.create: @@ -68,25 +68,33 @@ object with unmapped fields: body: - '{ "create": { } }' - '{ "name": "aaaa", "some_string": "AaAa", "some_int": 1000, "some_double": 123.456789, "some_bool": true, "a.very.deeply.nested.field": "AAAA" }' + - '{ "create": { } }' + - '{ "name": "bbbb", "some_string": "BbBb", "some_int": 2000, "some_double": 321.987654, "some_bool": false, "a.very.deeply.nested.field": "BBBB" }' - do: search: index: test + sort: name - - match: { hits.total.value: 1 } + - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: aaaa } - match: { hits.hits.0._source.some_string: AaAa } - match: { hits.hits.0._source.some_int: 1000 } - match: { hits.hits.0._source.some_double: 123.456789 } - match: { hits.hits.0._source.a.very.deeply.nested.field: AAAA } - match: { hits.hits.0._source.some_bool: true } + - match: { hits.hits.1._source.name: bbbb } + - match: { hits.hits.1._source.some_string: BbBb } + - match: { hits.hits.1._source.some_int: 2000 } + - match: { hits.hits.1._source.some_double: 321.987654 } + - match: { hits.hits.1._source.a.very.deeply.nested.field: BBBB } --- nested object with unmapped fields: - - skip: - version: " - " - reason: "mapper.track_ignored_source" + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source - do: indices.create: @@ -117,22 +125,28 @@ nested object with unmapped fields: body: - '{ "create": { } }' - '{ "path.to.name": "aaaa", "path.to.surname": "AaAa", "path.some.other.name": "AaAaAa" }' + - '{ "create": { } }' + - '{ "path.to.name": "bbbb", "path.to.surname": "BbBb", "path.some.other.name": "BbBbBb" }' - do: search: index: test + sort: path.to.name - - match: { hits.total.value: 1 } + - match: { hits.total.value: 2 } - match: { hits.hits.0._source.path.to.name: aaaa } - match: { hits.hits.0._source.path.to.surname: AaAa } - match: { hits.hits.0._source.path.some.other.name: AaAaAa } + - match: { hits.hits.1._source.path.to.name: bbbb } + - match: { hits.hits.1._source.path.to.surname: BbBb } + - match: { hits.hits.1._source.path.some.other.name: BbBbBb } --- empty object with unmapped fields: - - skip: - version: " - " - reason: "mapper.track_ignored_source" + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source - do: indices.create: From 1f1ab06572261a4c1b95b51e4fcc7521fb0e6145 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Apr 2024 09:27:59 +0100 Subject: [PATCH 006/244] Fix timeouts which override master-node timeouts (#107992) There's a couple of APIs whose `?timeout` parameter also sets the `?master_timeout` timeout unless the latter is specified separately. Today this logic happens within the relevant requests' `timeout()` setters, but it'd be enormously preferable to implement this REST-layer-specific logic in the REST layer instead, and configure the timeouts in the transport-layer requests explicitly. This commit does so. Relates #107984 --- .../org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java | 1 + .../java/org/elasticsearch/index/store/CorruptedFileIT.java | 1 + .../search/basic/SearchWithRandomIOExceptionsIT.java | 6 +++++- .../action/admin/cluster/health/ClusterHealthRequest.java | 3 --- .../node/shutdown/PrevalidateNodeRemovalRequest.java | 3 --- .../rest/action/admin/cluster/RestClusterHealthAction.java | 6 +++++- .../admin/cluster/RestPrevalidateNodeRemovalAction.java | 6 +++++- .../main/java/org/elasticsearch/test/ESIntegTestCase.java | 3 ++- .../java/org/elasticsearch/test/ESSingleNodeTestCase.java | 3 ++- .../test/java/org/elasticsearch/xpack/CcrIntegTestCase.java | 3 ++- 10 files changed, 23 insertions(+), 12 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java index 38921840a2c64..3135647adc9ab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/PrevalidateNodeRemovalIT.java @@ -184,6 +184,7 @@ public void testNodeRemovalFromRedClusterWithTimeout() throws Exception { PrevalidateNodeRemovalRequest req = PrevalidateNodeRemovalRequest.builder() .setNames(node2) .build() + .masterNodeTimeout(TimeValue.timeValueSeconds(1)) .timeout(TimeValue.timeValueSeconds(1)); PrevalidateNodeRemovalResponse resp = client().execute(PrevalidateNodeRemovalAction.INSTANCE, req).get(); assertFalse("prevalidation result should return false", resp.getPrevalidation().isSafe()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 58b63eb77d2bd..a9d19473164bf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -181,6 +181,7 @@ public void testCorruptFileAndRecover() throws InterruptedException, IOException ClusterHealthResponse health = clusterAdmin().health( new ClusterHealthRequest("test").waitForGreenStatus() // sometimes due to cluster rebalancing and random settings default timeout is just not enough. + .masterNodeTimeout(TimeValue.timeValueMinutes(5)) .timeout(TimeValue.timeValueMinutes(5)) .waitForNoRelocatingShards(true) ).actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 07d976437c24c..d0ff46238c42a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -100,7 +100,11 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc } ClusterHealthResponse clusterHealthResponse = clusterAdmin() // it's OK to timeout here - .health(new ClusterHealthRequest(new String[] {}).waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))) + .health( + new ClusterHealthRequest(new String[] {}).waitForYellowStatus() + .masterNodeTimeout(TimeValue.timeValueSeconds(5)) + .timeout(TimeValue.timeValueSeconds(5)) + ) .get(); final int numDocs; final boolean expectAllShardsFailed; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 7bf0c976d52a5..75313227a6dda 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -121,9 +121,6 @@ public TimeValue timeout() { public ClusterHealthRequest timeout(TimeValue timeout) { this.timeout = timeout; - if (masterNodeTimeout == DEFAULT_MASTER_NODE_TIMEOUT) { - masterNodeTimeout = timeout; - } return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java index acffb014715dd..a88fb83b2300d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java @@ -94,9 +94,6 @@ public TimeValue timeout() { public PrevalidateNodeRemovalRequest timeout(TimeValue timeout) { this.timeout = timeout; - if (masterNodeTimeout == DEFAULT_MASTER_NODE_TIMEOUT) { - masterNodeTimeout = timeout; - } return this; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java index b849181f62dc0..fe0c005046900 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterHealthAction.java @@ -64,8 +64,12 @@ public static ClusterHealthRequest fromRequest(final RestRequest request) { final ClusterHealthRequest clusterHealthRequest = new ClusterHealthRequest(indices); clusterHealthRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterHealthRequest.indicesOptions())); clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local())); - clusterHealthRequest.masterNodeTimeout(getMasterNodeTimeout(request)); clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout())); + if (request.hasParam("master_timeout")) { + clusterHealthRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + } else { + clusterHealthRequest.masterNodeTimeout(clusterHealthRequest.timeout()); + } String waitForStatus = request.param("wait_for_status"); if (waitForStatus != null) { clusterHealthRequest.waitForStatus(ClusterHealthStatus.valueOf(waitForStatus.toUpperCase(Locale.ROOT))); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java index ffcef72d244e9..119f6660f2a33 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPrevalidateNodeRemovalAction.java @@ -44,8 +44,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli .setIds(ids) .setExternalIds(externalIds) .build(); - prevalidationRequest.masterNodeTimeout(getMasterNodeTimeout(request)); prevalidationRequest.timeout(request.paramAsTime("timeout", prevalidationRequest.timeout())); + if (request.hasParam("master_timeout")) { + prevalidationRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + } else { + prevalidationRequest.masterNodeTimeout(prevalidationRequest.timeout()); + } return channel -> client.execute( PrevalidateNodeRemovalAction.INSTANCE, prevalidationRequest, diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 1056c766e17ca..fb6105005201f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -958,7 +958,8 @@ private ClusterHealthStatus ensureColor( String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT); String method = "ensure" + Strings.capitalize(color); - ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).timeout(timeout) + ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + .timeout(timeout) .waitForStatus(clusterHealthStatus) .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 0f3c3dd9b7263..5abca85ac0f42 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -412,7 +412,8 @@ public ClusterHealthStatus ensureGreen(String... indices) { */ public ClusterHealthStatus ensureGreen(TimeValue timeout, String... indices) { ClusterHealthResponse actionGet = clusterAdmin().health( - new ClusterHealthRequest(indices).timeout(timeout) + new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + .timeout(timeout) .waitForGreenStatus() .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index e67372516688f..52343be3f2c23 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -409,7 +409,8 @@ private ClusterHealthStatus ensureColor( String color = clusterHealthStatus.name().toLowerCase(Locale.ROOT); String method = "ensure" + Strings.capitalize(color); - ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).timeout(timeout) + ClusterHealthRequest healthRequest = new ClusterHealthRequest(indices).masterNodeTimeout(timeout) + .timeout(timeout) .waitForStatus(clusterHealthStatus) .waitForEvents(Priority.LANGUID) .waitForNoRelocatingShards(true) From 42e0b5bac405c8afb4e8ae99105949983fcaf460 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Mon, 29 Apr 2024 10:49:27 +0200 Subject: [PATCH 007/244] ESQL: Fix equals hashCode for functions (#107947) * Take the EsqlConfiguration into account in EsqlConfigurationFunction's equals and hashCode implementations. * Clean redundant implementations of hashCode and equals in descendants of EsqlScalarFunction. --- docs/changelog/107947.yaml | 6 ++ .../scalar/EsqlConfigurationFunction.java | 16 +++++ .../function/scalar/UnaryScalarFunction.java | 15 ----- .../expression/function/scalar/math/Log.java | 18 +----- .../expression/function/scalar/math/Pow.java | 22 +------ .../scalar/math/RationalUnaryPredicate.java | 36 ----------- .../function/scalar/math/Round.java | 15 ----- .../function/scalar/multivalue/MvSlice.java | 15 ----- .../function/scalar/multivalue/MvSort.java | 15 ----- .../function/scalar/multivalue/MvZip.java | 15 ----- .../function/scalar/string/Left.java | 7 +-- .../function/scalar/string/Right.java | 7 +-- .../elasticsearch/xpack/esql/CsvTests.java | 4 +- .../xpack/esql/SerializationTestUtils.java | 22 ++++++- ...AbstractConfigurationFunctionTestCase.java | 61 +++++++++++++++++++ .../scalar/date/DateExtractTests.java | 9 +-- .../function/scalar/date/DateFormatTests.java | 10 +-- .../function/scalar/string/ToLowerTests.java | 8 +-- .../function/scalar/string/ToUpperTests.java | 8 +-- .../xpack/esql/planner/EvalMapperTests.java | 2 +- 20 files changed, 131 insertions(+), 180 deletions(-) create mode 100644 docs/changelog/107947.yaml delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RationalUnaryPredicate.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java diff --git a/docs/changelog/107947.yaml b/docs/changelog/107947.yaml new file mode 100644 index 0000000000000..637ac3c005779 --- /dev/null +++ b/docs/changelog/107947.yaml @@ -0,0 +1,6 @@ +pr: 107947 +summary: "ESQL: Fix equals `hashCode` for functions" +area: ES|QL +type: bug +issues: + - 104393 diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java index 0c0ee1e84c2ec..f6dae5bd0117f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/EsqlConfigurationFunction.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.ql.tree.Source; import java.util.List; +import java.util.Objects; public abstract class EsqlConfigurationFunction extends EsqlScalarFunction { @@ -25,4 +26,19 @@ protected EsqlConfigurationFunction(Source source, List fields, Conf public Configuration configuration() { return configuration; } + + @Override + public int hashCode() { + return Objects.hash(getClass(), children(), configuration); + } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + EsqlConfigurationFunction other = (EsqlConfigurationFunction) obj; + + return configuration.equals(other.configuration); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 806bd9b0a12e1..0a9b4a7b7d0f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.ql.type.DataType; import java.util.Arrays; -import java.util.Objects; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isNumeric; @@ -47,18 +46,4 @@ public final Expression field() { public DataType dataType() { return field.dataType(); } - - @Override - public final int hashCode() { - return Objects.hash(field); - } - - @Override - public final boolean equals(Object obj) { - if (obj == null || obj.getClass() != getClass()) { - return false; - } - UnaryScalarFunction other = (UnaryScalarFunction) obj; - return Objects.equals(other.field, field); - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java index cf6cfa5525dc6..ffe92c8c19b3f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log.java @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.List; -import java.util.Objects; import java.util.function.Function; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; @@ -31,7 +30,8 @@ public class Log extends EsqlScalarFunction implements OptionalArgument { - private final Expression base, value; + private final Expression base; + private final Expression value; @FunctionInfo( returnType = "double", @@ -127,18 +127,4 @@ public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { return new LeftEvaluator.Factory( - source, + source(), context -> new BytesRef(), context -> new UnicodeUtil.UTF8CodePoint(), toEvaluator.apply(str), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java index 6469db23e6ddf..07810a7f9baff 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java @@ -36,11 +36,7 @@ * {code right(foo, len)} is an alias to {code substring(foo, foo.length-len, len)} */ public class Right extends EsqlScalarFunction { - - private final Source source; - private final Expression str; - private final Expression length; @FunctionInfo( @@ -54,7 +50,6 @@ public Right( @Param(name = "length", type = { "integer" }, description = "The number of characters to return.") Expression length ) { super(source, Arrays.asList(str, length)); - this.source = source; this.str = str; this.length = length; } @@ -84,7 +79,7 @@ static BytesRef process( @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { return new RightEvaluator.Factory( - source, + source(), context -> new BytesRef(), context -> new UnicodeUtil.UTF8CodePoint(), toEvaluator.apply(str), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index bfe4cbc6184ea..06a9319079087 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -446,7 +446,7 @@ private Throwable reworkException(Throwable th) { } // Asserts that the serialization and deserialization of the plan creates an equivalent plan. - private static void opportunisticallyAssertPlanSerialization(PhysicalPlan... plans) { + private void opportunisticallyAssertPlanSerialization(PhysicalPlan... plans) { for (var plan : plans) { var tmp = plan; do { @@ -455,7 +455,7 @@ private static void opportunisticallyAssertPlanSerialization(PhysicalPlan... pla } } while (tmp.children().isEmpty() == false && (tmp = tmp.children().get(0)) != null); - SerializationTestUtils.assertSerialization(plan); + SerializationTestUtils.assertSerialization(plan, configuration); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index 4be95b95afe54..312250d2f58d0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -41,7 +41,16 @@ public class SerializationTestUtils { private static final PlanNameRegistry planNameRegistry = new PlanNameRegistry(); public static void assertSerialization(PhysicalPlan plan) { - var deserPlan = serializeDeserialize(plan, PlanStreamOutput::writePhysicalPlanNode, PlanStreamInput::readPhysicalPlanNode); + assertSerialization(plan, EsqlTestUtils.TEST_CFG); + } + + public static void assertSerialization(PhysicalPlan plan, EsqlConfiguration configuration) { + var deserPlan = serializeDeserialize( + plan, + PlanStreamOutput::writePhysicalPlanNode, + PlanStreamInput::readPhysicalPlanNode, + configuration + ); EqualsHashCodeTestUtils.checkEqualsAndHashCode(plan, unused -> deserPlan); } @@ -51,7 +60,16 @@ public static void assertSerialization(LogicalPlan plan) { } public static void assertSerialization(Expression expression) { - Expression deserExpression = serializeDeserialize(expression, PlanStreamOutput::writeExpression, PlanStreamInput::readExpression); + assertSerialization(expression, EsqlTestUtils.TEST_CFG); + } + + public static void assertSerialization(Expression expression, EsqlConfiguration configuration) { + Expression deserExpression = serializeDeserialize( + expression, + PlanStreamOutput::writeExpression, + PlanStreamInput::readExpression, + configuration + ); EqualsHashCodeTestUtils.checkEqualsAndHashCode(expression, unused -> deserExpression); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java new file mode 100644 index 0000000000000..56c56870ccbb2 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractConfigurationFunctionTestCase.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.util.StringUtils; + +import java.util.List; + +import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; + +public abstract class AbstractConfigurationFunctionTestCase extends AbstractFunctionTestCase { + protected abstract Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration); + + @Override + protected Expression build(Source source, List args) { + return buildWithConfiguration(source, args, EsqlTestUtils.TEST_CFG); + } + + static EsqlConfiguration randomConfiguration() { + // TODO: Randomize the query and maybe the pragmas. + return new EsqlConfiguration( + randomZone(), + randomLocale(random()), + randomBoolean() ? null : randomAlphaOfLength(randomInt(64)), + randomBoolean() ? null : randomAlphaOfLength(randomInt(64)), + QueryPragmas.EMPTY, + EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), + EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), + StringUtils.EMPTY, + randomBoolean() + ); + } + + public void testSerializationWithConfiguration() { + EsqlConfiguration config = randomConfiguration(); + Expression expr = buildWithConfiguration(testCase.getSource(), testCase.getDataAsFields(), config); + + assertSerialization(expr, config); + + EsqlConfiguration differentConfig; + do { + differentConfig = randomConfiguration(); + } while (config.equals(differentConfig)); + + Expression differentExpr = buildWithConfiguration(testCase.getSource(), testCase.getDataAsFields(), differentConfig); + assertFalse(expr.equals(differentExpr)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index 7a65c8d468644..d862a07c2fd0f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -13,8 +13,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.ql.InvalidArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; @@ -31,7 +32,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -public class DateExtractTests extends AbstractFunctionTestCase { +public class DateExtractTests extends AbstractConfigurationFunctionTestCase { public DateExtractTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -128,7 +129,7 @@ public void testInvalidChrono() { } @Override - protected Expression build(Source source, List args) { - return new DateExtract(source, args.get(0), args.get(1), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new DateExtract(source, args.get(0), args.get(1), configuration); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java index ee4152db2856a..c6c544fced4c4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java @@ -12,9 +12,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -24,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; -public class DateFormatTests extends AbstractFunctionTestCase { +public class DateFormatTests extends AbstractConfigurationFunctionTestCase { public DateFormatTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -67,7 +67,7 @@ public static Iterable parameters() { } @Override - protected Expression build(Source source, List args) { - return new DateFormat(source, args.get(0), args.get(1), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new DateFormat(source, args.get(0), args.get(1), configuration); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index bd9205c930d51..2e0494723a518 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.equalTo; -public class ToLowerTests extends AbstractFunctionTestCase { +public class ToLowerTests extends AbstractConfigurationFunctionTestCase { public ToLowerTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -72,8 +72,8 @@ private EsqlConfiguration randomLocaleConfig() { } @Override - protected Expression build(Source source, List args) { - return new ToLower(source, args.get(0), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new ToLower(source, args.get(0), configuration); } private static TestCaseSupplier supplier(String name, DataType type, Supplier valueSupplier) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index ce7c011f201d8..f5d0283d0691b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -14,8 +14,8 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractConfigurationFunctionTestCase; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.equalTo; -public class ToUpperTests extends AbstractFunctionTestCase { +public class ToUpperTests extends AbstractConfigurationFunctionTestCase { public ToUpperTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -72,8 +72,8 @@ private EsqlConfiguration randomLocaleConfig() { } @Override - protected Expression build(Source source, List args) { - return new ToUpper(source, args.get(0), EsqlTestUtils.TEST_CFG); + protected Expression buildWithConfiguration(Source source, List args, EsqlConfiguration configuration) { + return new ToUpper(source, args.get(0), configuration); } private static TestCaseSupplier supplier(String name, DataType type, Supplier valueSupplier) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 1d2b11d3deb89..7f8124bec6895 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -152,7 +152,7 @@ public void testEvaluatorSuppliers() { // Test serialization of expressions, since we have convenient access to some expressions. public void testExpressionSerialization() { - SerializationTestUtils.assertSerialization(expression); + SerializationTestUtils.assertSerialization(expression, TEST_CONFIG); } private static FieldAttribute field(String name, DataType type) { From fd3b0cf30cd1593efa871423ce8e56daa91dd4bd Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Mon, 29 Apr 2024 10:51:11 +0200 Subject: [PATCH 008/244] ESQL: Add more tests for constant aggregations (#107888) Add cases of foldable expressions to the existing csv tests for each aggregation that supports constants: one that folds to a number, and one that folds to null. --- .../src/main/resources/stats.csv-spec | 169 +++++++++--------- 1 file changed, 84 insertions(+), 85 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 70d6fd6b6d097..3aaace930eed7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -694,8 +694,7 @@ ca:l|gender:s 0 |null ; - -countFieldVsAll#[skip:-8.14.99, reason:Fixed count(null) in 8.15] +countFieldVsAll#[skip:-8.13.99, reason:Fixed count(null) in 8.14] from employees | stats ca = count(), cn = count(null), cf = count(gender) by gender | sort gender; ca:l|cn:l|cf:l|gender:s @@ -1456,172 +1455,172 @@ rows:l 6 ; -countOfConst#[skip:-8.14.99,reason:supported in 8.15] +countOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = count(1), s2point1 = count(2.1), s_mv = count([-1, 0, 3]) * 3, s_null = count(null), rows = count(*) +| STATS s1 = count(1), s2point1 = count(2.1), s_mv = count([-1, 0, 3]) * 3, s_null = count(null), s_expr = count(1+1), s_expr_null = count(1+null), rows = count(*) ; -s1:l | s2point1:l | s_mv:l | s_null:l | rows:l -100 | 100 | 900 | 0 | 100 +s1:l | s2point1:l | s_mv:l | s_null:l | s_expr:l | s_expr_null:l | rows:l +100 | 100 | 900 | 0 | 100 | 0 | 100 ; -countOfConstGrouped#[skip:-8.14.99,reason:supported in 8.15] +countOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = count("two point one"), s_mv = count([-1, 0, 3]), s_null = count(null), rows = count(*) by languages +| STATS s2point1 = count("two point one"), s_mv = count([-1, 0, 3]), s_null = count(null), s_expr = count(1+1), s_expr_null = count(1+null), rows = count(*) by languages | SORT languages ; -s2point1:l | s_mv:l | s_null:l | rows:l | languages:i -15 | 45 | 0 | 15 | 1 -19 | 57 | 0 | 19 | 2 -17 | 51 | 0 | 17 | 3 -18 | 54 | 0 | 18 | 4 -21 | 63 | 0 | 21 | 5 -10 | 30 | 0 | 10 | null +s2point1:l | s_mv:l | s_null:l | s_expr:l | s_expr_null:l | rows:l | languages:i +15 | 45 | 0 | 15 | 0 | 15 | 1 +19 | 57 | 0 | 19 | 0 | 19 | 2 +17 | 51 | 0 | 17 | 0 | 17 | 3 +18 | 54 | 0 | 18 | 0 | 18 | 4 +21 | 63 | 0 | 21 | 0 | 21 | 5 +10 | 30 | 0 | 10 | 0 | 10 | null ; sumOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = sum(1), s2point1 = sum(2.1), s_mv = sum([-1, 0, 3]) * 3, s_null = sum(null), rows = count(*) +| STATS s1 = sum(1), s2point1 = sum(2.1), s_mv = sum([-1, 0, 3]) * 3, s_null = sum(null), s_expr = sum(1+1), s_expr_null = sum(1+null), rows = count(*) ; -s1:l | s2point1:d | s_mv:l | s_null:d | rows:l -100 | 210.0 | 600 | null | 100 +s1:l | s2point1:d | s_mv:l | s_null:d | s_expr:l | s_expr_null:l | rows:l +100 | 210.0 | 600 | null | 200 | null | 100 ; sumOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = round(sum(2.1), 1), s_mv = sum([-1, 0, 3]), rows = count(*) by languages +| STATS s2point1 = round(sum(2.1), 1), s_mv = sum([-1, 0, 3]), s_expr = sum(1+1), s_expr_null = sum(1+null), rows = count(*) by languages | SORT languages ; -s2point1:d | s_mv:l | rows:l | languages:i -31.5 | 30 | 15 | 1 -39.9 | 38 | 19 | 2 -35.7 | 34 | 17 | 3 -37.8 | 36 | 18 | 4 -44.1 | 42 | 21 | 5 -21.0 | 20 | 10 | null +s2point1:d | s_mv:l | s_expr:l | s_expr_null:l | rows:l | languages:i +31.5 | 30 | 30 | null | 15 | 1 +39.9 | 38 | 38 | null | 19 | 2 +35.7 | 34 | 34 | null | 17 | 3 +37.8 | 36 | 36 | null | 18 | 4 +44.1 | 42 | 42 | null | 21 | 5 +21.0 | 20 | 20 | null | 10 | null ; avgOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = avg(1), s_mv = avg([-1, 0, 3]) * 3, s_null = avg(null) +| STATS s1 = avg(1), s_mv = avg([-1, 0, 3]) * 3, s_null = avg(null), s_expr = avg(1+1), s_expr_null = avg(1+null) ; -s1:d | s_mv:d | s_null:d -1.0 | 2.0 | null +s1:d | s_mv:d | s_null:d | s_expr:d | s_expr_null:d +1.0 | 2.0 | null | 2.0 | null ; avgOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = avg(2.1), s_mv = avg([-1, 0, 3]) * 3 by languages +| STATS s2point1 = avg(2.1), s_mv = avg([-1, 0, 3]) * 3, s_expr = avg(1+1), s_expr_null = avg(1+null) by languages | SORT languages ; -s2point1:d | s_mv:d | languages:i -2.1 | 2.0 | 1 -2.1 | 2.0 | 2 -2.1 | 2.0 | 3 -2.1 | 2.0 | 4 -2.1 | 2.0 | 5 -2.1 | 2.0 | null +s2point1:d | s_mv:d | s_expr:d | s_expr_null:d | languages:i +2.1 | 2.0 | 2.0 | null | 1 +2.1 | 2.0 | 2.0 | null | 2 +2.1 | 2.0 | 2.0 | null | 3 +2.1 | 2.0 | 2.0 | null | 4 +2.1 | 2.0 | 2.0 | null | 5 +2.1 | 2.0 | 2.0 | null | null ; minOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = min(1), s_mv = min([-1, 0, 3]), s_null = min(null) +| STATS s1 = min(1), s_mv = min([-1, 0, 3]), s_null = min(null), s_expr = min(1+1), s_expr_null = min(1+null) ; -s1:i | s_mv:i | s_null:null -1 | -1 | null +s1:i | s_mv:i | s_null:null | s_expr:i | s_expr_null:i +1 | -1 | null | 2 | null ; minOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = min(2.1), s_mv = min([-1, 0, 3]) by languages +| STATS s2point1 = min(2.1), s_mv = min([-1, 0, 3]), s_expr = min(1+1), s_expr_null = min(1+null) by languages | SORT languages ; -s2point1:d | s_mv:i | languages:i -2.1 | -1 | 1 -2.1 | -1 | 2 -2.1 | -1 | 3 -2.1 | -1 | 4 -2.1 | -1 | 5 -2.1 | -1 | null +s2point1:d | s_mv:i | s_expr:i | s_expr_null:i | languages:i +2.1 | -1 | 2 | null | 1 +2.1 | -1 | 2 | null | 2 +2.1 | -1 | 2 | null | 3 +2.1 | -1 | 2 | null | 4 +2.1 | -1 | 2 | null | 5 +2.1 | -1 | 2 | null | null ; maxOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = max(1), s_mv = max([-1, 0, 3]), s_null = max(null) +| STATS s1 = max(1), s_mv = max([-1, 0, 3]), s_null = max(null), s_expr = max(1+1), s_expr_null = max(1+null) ; -s1:i | s_mv:i | s_null:null -1 | 3 | null +s1:i | s_mv:i | s_null:null | s_expr:i | s_expr_null:i +1 | 3 | null | 2 | null ; maxOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = max(2.1), s_mv = max([-1, 0, 3]) by languages +| STATS s2point1 = max(2.1), s_mv = max([-1, 0, 3]), s_expr = max(1+1), s_expr_null = max(1+null) by languages | SORT languages ; -s2point1:d | s_mv:i | languages:i -2.1 | 3 | 1 -2.1 | 3 | 2 -2.1 | 3 | 3 -2.1 | 3 | 4 -2.1 | 3 | 5 -2.1 | 3 | null +s2point1:d | s_mv:i | s_expr:i | s_expr_null:i | languages:i +2.1 | 3 | 2 | null | 1 +2.1 | 3 | 2 | null | 2 +2.1 | 3 | 2 | null | 3 +2.1 | 3 | 2 | null | 4 +2.1 | 3 | 2 | null | 5 +2.1 | 3 | 2 | null | null ; -medianOfConst#[skip:-8.14.99,reason:supported in 8.15] +medianOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = median(1), s_mv = median([-1, 0, 1, 3]), s_null = median(null) +| STATS s1 = median(1), s_mv = median([-1, 0, 1, 3]), s_null = median(null), s_expr = median(1+1), s_expr_null = median(1+null) ; -s1:d | s_mv:d | s_null:d -1.0 | 0.5 | null +s1:d | s_mv:d | s_null:d | s_expr:d | s_expr_null:d +1.0 | 0.5 | null | 2.0 | null ; -medianOfConstGrouped#[skip:-8.14.99,reason:supported in 8.15] +medianOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = median(2.1), s_mv = median([-1, 0, 1, 3]) by languages +| STATS s2point1 = median(2.1), s_mv = median([-1, 0, 1, 3]), s_expr = median(1+1), s_expr_null = median(1+null) by languages | SORT languages ; -s2point1:d | s_mv:d | languages:i -2.1 | 0.5 | 1 -2.1 | 0.5 | 2 -2.1 | 0.5 | 3 -2.1 | 0.5 | 4 -2.1 | 0.5 | 5 -2.1 | 0.5 | null +s2point1:d | s_mv:d | s_expr:d | s_expr_null:d | languages:i +2.1 | 0.5 | 2.0 | null | 1 +2.1 | 0.5 | 2.0 | null | 2 +2.1 | 0.5 | 2.0 | null | 3 +2.1 | 0.5 | 2.0 | null | 4 +2.1 | 0.5 | 2.0 | null | 5 +2.1 | 0.5 | 2.0 | null | null ; -countDistinctOfConst#[skip:-8.14.99,reason:supported in 8.15] +countDistinctOfConst#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s1 = count_distinct(1), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_null = count_distinct(null), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 5) +| STATS s1 = count_distinct(1), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_null = count_distinct(null), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 5), s_expr = count_distinct(1+1), s_expr_null = count_distinct(1+null) ; -s1:l | s_mv:l | s_null:l | s_param:l -1 | 4 | 0 | 4 +s1:l | s_mv:l | s_null:l | s_param:l | s_expr:l | s_expr_null:l +1 | 4 | 0 | 4 | 1 | 0 ; -countDistinctOfConstGrouped#[skip:-8.14.99,reason:supported in 8.15] +countDistinctOfConstGrouped#[skip:-8.13.99,reason:supported in 8.14] FROM employees -| STATS s2point1 = count_distinct("two point one"), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 8000) by languages +| STATS s2point1 = count_distinct("two point one"), s_mv = count_distinct([-1, 0, 3, 1, -1, 3]), s_param = count_distinct([-1, 0, 3, 1, -1, 3], 8000), s_expr = count_distinct(1+1), s_expr_null = count_distinct(1+null) by languages | SORT languages ; -s2point1:l | s_mv:l | s_param:l | languages:i -1 | 4 | 4 | 1 -1 | 4 | 4 | 2 -1 | 4 | 4 | 3 -1 | 4 | 4 | 4 -1 | 4 | 4 | 5 -1 | 4 | 4 | null +s2point1:l | s_mv:l | s_param:l | s_expr:l | s_expr_null:l | languages:i +1 | 4 | 4 | 1 | 0 | 1 +1 | 4 | 4 | 1 | 0 | 2 +1 | 4 | 4 | 1 | 0 | 3 +1 | 4 | 4 | 1 | 0 | 4 +1 | 4 | 4 | 1 | 0 | 5 +1 | 4 | 4 | 1 | 0 | null ; evalOverridingKey#[skip:-8.13.1,reason:fixed in 8.13.2] From f53f06ea882fed11ea6bcfb804e47e539a302634 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 29 Apr 2024 09:53:51 +0100 Subject: [PATCH 009/244] Define transport version constant for 8.13 (#107951) --- .../bucket/histogram/InternalAutoDateHistogram.java | 2 +- .../src/main/java/org/elasticsearch/TransportVersions.java | 2 +- .../aggregations/bucket/histogram/InternalDateHistogram.java | 2 +- .../aggregations/bucket/histogram/InternalHistogram.java | 2 +- .../bucket/histogram/InternalVariableWidthHistogram.java | 2 +- .../xpack/downsample/DownsampleShardTaskParams.java | 5 ++--- .../openai/embeddings/OpenAiEmbeddingsServiceSettings.java | 4 ++-- .../openai/embeddings/OpenAiEmbeddingsTaskSettings.java | 4 ++-- 8 files changed, 11 insertions(+), 12 deletions(-) diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index 7af912fe3efa2..cfaf4b77a07be 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -230,7 +230,7 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { bucketInnerInterval = 1; // Calculated on merge. } // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingLong(b -> b.key)); } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fc4323e418b72..0c9c47eb589a2 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -132,7 +132,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_DIMENSIONS_SET_BY_USER_ADDED = def(8_592_00_0); public static final TransportVersion INDEX_REQUEST_NORMALIZED_BYTES_PARSED = def(8_593_00_0); public static final TransportVersion INGEST_GRAPH_STRUCTURE_EXCEPTION = def(8_594_00_0); - public static final TransportVersion ML_MODEL_IN_SERVICE_SETTINGS = def(8_595_00_0); + public static final TransportVersion V_8_13_0 = def(8_595_00_0); // 8.14.0+ public static final TransportVersion RANDOM_AGG_SHARD_SEED = def(8_596_00_0); public static final TransportVersion ESQL_TIMINGS = def(8_597_00_0); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 4939c3bc88744..f29850a306b75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -265,7 +265,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { } buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingLong(b -> b.key)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 4ff01c5648486..7b264ccb022e5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -244,7 +244,7 @@ public InternalHistogram(StreamInput in) throws IOException { keyed = in.readBoolean(); buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingDouble(b -> b.key)); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 05944b75d06d5..46b5a1b7629d8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -261,7 +261,7 @@ public InternalVariableWidthHistogram(StreamInput in) throws IOException { buckets = in.readCollectionAsList(stream -> new Bucket(stream, format)); targetNumBuckets = in.readVInt(); // we changed the order format in 8.13 for partial reduce, therefore we need to order them to perform merge sort - if (in.getTransportVersion().between(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { + if (in.getTransportVersion().between(TransportVersions.V_8_13_0, TransportVersions.HISTOGRAM_AGGS_KEY_SORTED)) { // list is mutable by #readCollectionAsList contract buckets.sort(Comparator.comparingDouble(b -> b.centroid)); } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java index 93be79e859f8d..54fd4946df554 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java @@ -37,7 +37,6 @@ public record DownsampleShardTaskParams( String[] dimensions ) implements PersistentTaskParams { - private static final TransportVersion V_8_13_0 = TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS; public static final String NAME = DownsampleShardTask.TASK_NAME; private static final ParseField DOWNSAMPLE_CONFIG = new ParseField("downsample_config"); private static final ParseField DOWNSAMPLE_INDEX = new ParseField("rollup_index"); @@ -73,7 +72,7 @@ public record DownsampleShardTaskParams( new ShardId(in), in.readStringArray(), in.readStringArray(), - in.getTransportVersion().onOrAfter(V_8_13_0) ? in.readOptionalStringArray() : new String[] {} + in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0) ? in.readOptionalStringArray() : new String[] {} ); } @@ -112,7 +111,7 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); out.writeStringArray(metrics); out.writeStringArray(labels); - if (out.getTransportVersion().onOrAfter(V_8_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeOptionalStringArray(dimensions); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java index 169861381028c..8edbb7bc14f2c 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsServiceSettings.java @@ -182,7 +182,7 @@ public OpenAiEmbeddingsServiceSettings(StreamInput in) throws IOException { } else { dimensionsSetByUser = false; } - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { modelId = in.readString(); } else { modelId = "unset"; @@ -322,7 +322,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.ML_DIMENSIONS_SET_BY_USER_ADDED)) { out.writeBoolean(dimensionsSetByUser); } - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeString(modelId); } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java index e306f2d3d2928..b4cf9b27d0ff1 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/openai/embeddings/OpenAiEmbeddingsTaskSettings.java @@ -68,7 +68,7 @@ public OpenAiEmbeddingsTaskSettings(@Nullable String user) { } public OpenAiEmbeddingsTaskSettings(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { this.user = in.readOptionalString(); } else { var discard = in.readString(); @@ -102,7 +102,7 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.ML_MODEL_IN_SERVICE_SETTINGS)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { out.writeOptionalString(user); } else { out.writeString("m"); // write any string From 177dc263b3fdd74e4ecf80ed6f34e4cfb11e6697 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 29 Apr 2024 09:58:12 +0100 Subject: [PATCH 010/244] Block specific config files from being accessed after startup (#107481) Some files should never be accessed by ES or plugin code once startup has completed. Use the security manager to block these files from being accessed by anything at all. The current blocked files are elasticsearch.yml, jvm.options, and the jvm.options.d directory. --- docs/changelog/107481.yaml | 5 ++ .../test/analysis-common/40_token_filters.yml | 17 +++++ .../bootstrap/ESPolicyUnitTests.java | 38 ++++++++-- .../org/elasticsearch/bootstrap/ESPolicy.java | 58 +++++++++------ .../org/elasticsearch/bootstrap/Security.java | 17 ++++- .../index/analysis/Analysis.java | 10 +-- .../elasticsearch/watcher/FileWatcher.java | 71 ++++++++++++++----- .../bootstrap/BootstrapForTesting.java | 3 +- 8 files changed, 167 insertions(+), 52 deletions(-) create mode 100644 docs/changelog/107481.yaml diff --git a/docs/changelog/107481.yaml b/docs/changelog/107481.yaml new file mode 100644 index 0000000000000..9e65b457c9ed6 --- /dev/null +++ b/docs/changelog/107481.yaml @@ -0,0 +1,5 @@ +pr: 107481 +summary: Block specific config files from being read after startup +area: Security +type: bug +issues: [] diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml index 5e703349a41ec..c8c3b032200b7 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/analysis-common/40_token_filters.yml @@ -662,6 +662,23 @@ - match: { tokens.0.token: baz } - match: { tokens.1.token: baz } +--- +"stemmer_override file access": + - do: + catch: bad_request + indices.create: + index: test + body: + settings: + analysis: + filter: + my_stemmer_override: + type: stemmer_override + rules_path: "jvm.options" + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } + - match: { error.reason: "/Access.denied.trying.to.read.file.rules_path.*/" } + --- "decompounder": - do: diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java index bd0418abc27a8..bd26146f92c0d 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/ESPolicyUnitTests.java @@ -47,22 +47,20 @@ public void testNullCodeSource() throws Exception { Permission all = new AllPermission(); PermissionCollection allCollection = all.newPermissionCollection(); allCollection.add(all); - ESPolicy policy = new ESPolicy(TEST_CODEBASES, allCollection, Collections.emptyMap(), true, List.of()); + ESPolicy policy = new ESPolicy(TEST_CODEBASES, allCollection, Collections.emptyMap(), true, List.of(), List.of()); // restrict ourselves to NoPermission PermissionCollection noPermissions = new Permissions(); assertFalse(policy.implies(new ProtectionDomain(null, noPermissions), new FilePermission("foo", "read"))); } /** - * test with null location - *

- * its unclear when/if this happens, see https://bugs.openjdk.java.net/browse/JDK-8129972 + * As of JDK 9, {@link CodeSource#getLocation} is documented to potentially return {@code null} */ @SuppressForbidden(reason = "to create FilePermission object") public void testNullLocation() throws Exception { assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); PermissionCollection noPermissions = new Permissions(); - ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of()); + ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of(), List.of()); assertFalse( policy.implies( new ProtectionDomain(new CodeSource(null, (Certificate[]) null), noPermissions), @@ -74,7 +72,7 @@ public void testNullLocation() throws Exception { public void testListen() { assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); final PermissionCollection noPermissions = new Permissions(); - final ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of()); + final ESPolicy policy = new ESPolicy(TEST_CODEBASES, noPermissions, Collections.emptyMap(), true, List.of(), List.of()); assertFalse( policy.implies( new ProtectionDomain(ESPolicyUnitTests.class.getProtectionDomain().getCodeSource(), noPermissions), @@ -91,7 +89,8 @@ public void testDataPathPermissionIsChecked() { new Permissions(), Collections.emptyMap(), true, - List.of(new FilePermission("/home/elasticsearch/data/-", "read")) + List.of(new FilePermission("/home/elasticsearch/data/-", "read")), + List.of() ); assertTrue( policy.implies( @@ -100,4 +99,29 @@ public void testDataPathPermissionIsChecked() { ) ); } + + @SuppressForbidden(reason = "to create FilePermission object") + public void testForbiddenFilesAreForbidden() { + assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); + + FilePermission configPerm = new FilePermission("/home/elasticsearch/config/-", "read"); + PermissionCollection coll = configPerm.newPermissionCollection(); + coll.add(configPerm); + + ESPolicy policy = new ESPolicy( + TEST_CODEBASES, + coll, + Collections.emptyMap(), + true, + List.of(), + List.of(new FilePermission("/home/elasticsearch/config/forbidden.yml", "read")) + ); + ProtectionDomain pd = new ProtectionDomain( + new CodeSource(randomBoolean() ? null : randomFrom(TEST_CODEBASES.values()), (Certificate[]) null), + new Permissions() + ); + + assertTrue(policy.implies(pd, new FilePermission("/home/elasticsearch/config/config.yml", "read"))); + assertFalse(policy.implies(pd, new FilePermission("/home/elasticsearch/config/forbidden.yml", "read"))); + } } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index e8244fcd576ff..d349403505311 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -39,6 +39,7 @@ final class ESPolicy extends Policy { final Policy system; final PermissionCollection dynamic; final PermissionCollection dataPathPermission; + final PermissionCollection forbiddenFilePermission; final Map plugins; ESPolicy( @@ -46,18 +47,12 @@ final class ESPolicy extends Policy { PermissionCollection dynamic, Map plugins, boolean filterBadDefaults, - List dataPathPermissions + List dataPathPermissions, + List forbiddenFilePermissions ) { this.template = PolicyUtil.readPolicy(getClass().getResource(POLICY_RESOURCE), codebases); - PermissionCollection dpPermissions = null; - for (FilePermission permission : dataPathPermissions) { - if (dpPermissions == null) { - dpPermissions = permission.newPermissionCollection(); - } - dpPermissions.add(permission); - } - this.dataPathPermission = dpPermissions == null ? new Permissions() : dpPermissions; - this.dataPathPermission.setReadOnly(); + this.dataPathPermission = createPermission(dataPathPermissions); + this.forbiddenFilePermission = createPermission(forbiddenFilePermissions); this.untrusted = PolicyUtil.readPolicy(getClass().getResource(UNTRUSTED_RESOURCE), Collections.emptyMap()); if (filterBadDefaults) { this.system = new SystemPolicy(Policy.getPolicy()); @@ -68,6 +63,21 @@ final class ESPolicy extends Policy { this.plugins = plugins; } + private static PermissionCollection createPermission(List permissions) { + PermissionCollection coll = null; + for (FilePermission permission : permissions) { + if (coll == null) { + coll = permission.newPermissionCollection(); + } + coll.add(permission); + } + if (coll == null) { + coll = new Permissions(); + } + coll.setReadOnly(); + return coll; + } + @Override @SuppressForbidden(reason = "fast equals check is desired") public boolean implies(ProtectionDomain domain, Permission permission) { @@ -77,9 +87,12 @@ public boolean implies(ProtectionDomain domain, Permission permission) { return false; } + // completely deny access to specific files that are forbidden + if (forbiddenFilePermission.implies(permission)) { + return false; + } + URL location = codeSource.getLocation(); - // location can be null... ??? nobody knows - // https://bugs.openjdk.java.net/browse/JDK-8129972 if (location != null) { // run scripts with limited permissions if (BootstrapInfo.UNTRUSTED_CODEBASE.equals(location.getFile())) { @@ -93,17 +106,16 @@ public boolean implies(ProtectionDomain domain, Permission permission) { } } - if (permission instanceof FilePermission) { - // The FilePermission to check access to the path.data is the hottest permission check in - // Elasticsearch, so we check it first. - if (dataPathPermission.implies(permission)) { - return true; - } - // Special handling for broken Hadoop code: "let me execute or my classes will not load" - // yeah right, REMOVE THIS when hadoop is fixed - if ("<>".equals(permission.getName())) { - hadoopHack(); - } + // The FilePermission to check access to the path.data is the hottest permission check in + // Elasticsearch, so we explicitly check it here. + if (dataPathPermission.implies(permission)) { + return true; + } + + // Special handling for broken Hadoop code: "let me execute or my classes will not load" + // yeah right, REMOVE THIS when hadoop is fixed + if (permission instanceof FilePermission && "<>".equals(permission.getName())) { + hadoopHack(); } // otherwise defer to template + dynamic file permissions diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Security.java b/server/src/main/java/org/elasticsearch/bootstrap/Security.java index eef7228bb4812..1c37b3492c4cb 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Security.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -125,7 +125,8 @@ static void configure(Environment environment, boolean filterBadDefaults, Path p createPermissions(environment, pidFile), getPluginAndModulePermissions(environment), filterBadDefaults, - createRecursiveDataPathPermission(environment) + createRecursiveDataPathPermission(environment), + createForbiddenFilePermissions(environment) ) ); @@ -188,6 +189,18 @@ private static List createRecursiveDataPathPermission(Environmen return toFilePermissions(policy); } + private static List createForbiddenFilePermissions(Environment environment) throws IOException { + Permissions policy = new Permissions(); + addSingleFilePath(policy, environment.configFile().resolve("elasticsearch.yml"), "read,readlink,write,delete,execute"); + addSingleFilePath(policy, environment.configFile().resolve("jvm.options"), "read,readlink,write,delete,execute"); + Path jvmOptionsD = environment.configFile().resolve("jvm.options.d"); + if (Files.isDirectory(jvmOptionsD)) { + // we don't want to create this if it doesn't exist + addDirectoryPath(policy, "forbidden_access", jvmOptionsD, "read,readlink,write,delete,execute", false); + } + return toFilePermissions(policy); + } + /** Adds access to classpath jars/classes for jar hell scan, etc */ @SuppressForbidden(reason = "accesses fully qualified URLs to configure security") static void addClasspathPermissions(Permissions policy) throws IOException { @@ -219,6 +232,7 @@ static void addFilePermissions(Permissions policy, Environment environment, Path addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsFile(), "read,readlink", false); addDirectoryPath(policy, "path.conf", environment.configFile(), "read,readlink", false); + // read-write dirs addDirectoryPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete", false); addDirectoryPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete", false); @@ -251,6 +265,7 @@ static void addFilePermissions(Permissions policy, Environment environment, Path for (Path path : environment.repoFiles()) { addDirectoryPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete", false); } + if (pidFile != null) { // we just need permission to remove the file if its elsewhere. addSingleFilePath(policy, pidFile, "delete"); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index d3e281ca115e1..e4b86876c99d3 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -63,6 +63,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessControlException; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; @@ -246,16 +247,17 @@ public static List getWordList( try { return loadWordList(path, removeComments); } catch (CharacterCodingException ex) { - String message = String.format( - Locale.ROOT, + String message = Strings.format( "Unsupported character encoding detected while reading %s: %s - files must be UTF-8 encoded", settingPath, - path.toString() + path ); throw new IllegalArgumentException(message, ex); } catch (IOException ioe) { - String message = String.format(Locale.ROOT, "IOException while reading %s: %s", settingPath, path.toString()); + String message = Strings.format("IOException while reading %s: %s", settingPath, path); throw new IllegalArgumentException(message, ioe); + } catch (AccessControlException ace) { + throw new IllegalArgumentException(Strings.format("Access denied trying to read file %s: %s", settingPath, path), ace); } } diff --git a/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java b/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java index bc26165432817..3f9cd42504cd5 100644 --- a/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java +++ b/server/src/main/java/org/elasticsearch/watcher/FileWatcher.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.util.CollectionUtils; @@ -17,6 +18,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.attribute.BasicFileAttributes; +import java.security.AccessControlException; import java.util.Arrays; /** @@ -75,20 +77,51 @@ protected void doCheckAndNotify() throws IOException { rootFileObserver.checkAndNotify(); } - private static final FileObserver[] EMPTY_DIRECTORY = new FileObserver[0]; + private static final Observer[] EMPTY_DIRECTORY = new Observer[0]; - private class FileObserver { - private final Path path; + private abstract static class Observer { + final Path path; + boolean exists; + boolean isDirectory; - private boolean exists; + private Observer(Path path) { + this.path = path; + } + + abstract void checkAndNotify() throws IOException; + + abstract void onDirectoryDeleted(); + + abstract void onFileDeleted(); + } + + /** + * A placeholder {@link Observer} for a file that we don't have permissions to access. + * We can't watch it for changes, but it shouldn't block us from watching other files in the same directory. + */ + private static class DeniedObserver extends Observer { + private DeniedObserver(Path path) { + super(path); + } + + @Override + void checkAndNotify() throws IOException {} + + @Override + void onDirectoryDeleted() {} + + @Override + void onFileDeleted() {} + } + + private class FileObserver extends Observer { private long length; private long lastModified; - private boolean isDirectory; - private FileObserver[] children; + private Observer[] children; private byte[] digest; FileObserver(Path path) { - this.path = path; + super(path); } public void checkAndNotify() throws IOException { @@ -199,10 +232,16 @@ private void init(boolean initial) throws IOException { } } - private FileObserver createChild(Path file, boolean initial) throws IOException { - FileObserver child = new FileObserver(file); - child.init(initial); - return child; + private Observer createChild(Path file, boolean initial) throws IOException { + try { + FileObserver child = new FileObserver(file); + child.init(initial); + return child; + } catch (AccessControlException e) { + // don't have permissions, use a placeholder + logger.debug(() -> Strings.format("Don't have permissions to watch path [%s]", file), e); + return new DeniedObserver(file); + } } private Path[] listFiles() throws IOException { @@ -211,10 +250,10 @@ private Path[] listFiles() throws IOException { return files; } - private FileObserver[] listChildren(boolean initial) throws IOException { + private Observer[] listChildren(boolean initial) throws IOException { Path[] files = listFiles(); if (CollectionUtils.isEmpty(files) == false) { - FileObserver[] childObservers = new FileObserver[files.length]; + Observer[] childObservers = new Observer[files.length]; for (int i = 0; i < files.length; i++) { childObservers[i] = createChild(files[i], initial); } @@ -227,7 +266,7 @@ private FileObserver[] listChildren(boolean initial) throws IOException { private void updateChildren() throws IOException { Path[] files = listFiles(); if (CollectionUtils.isEmpty(files) == false) { - FileObserver[] newChildren = new FileObserver[files.length]; + Observer[] newChildren = new Observer[files.length]; int child = 0; int file = 0; while (file < files.length || child < children.length) { @@ -294,7 +333,7 @@ private void onFileCreated(boolean initial) { } } - private void onFileDeleted() { + void onFileDeleted() { for (FileChangesListener listener : listeners()) { try { listener.onFileDeleted(path); @@ -330,7 +369,7 @@ private void onDirectoryCreated(boolean initial) throws IOException { children = listChildren(initial); } - private void onDirectoryDeleted() { + void onDirectoryDeleted() { // First delete all children for (int child = 0; child < children.length; child++) { deleteChild(child); diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index a8acabaa2914d..3aed133c590f7 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -174,7 +174,8 @@ public class BootstrapForTesting { perms, getPluginPermissions(), true, - Security.toFilePermissions(fastPathPermissions) + Security.toFilePermissions(fastPathPermissions), + List.of() ); Policy.setPolicy(new Policy() { @Override From 9242e012c1e210df4b3a18ce5a6bba1afeac1fe0 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Apr 2024 10:41:01 +0100 Subject: [PATCH 011/244] Intern common `TimeValue` constants (#107985) The values `30s` and `1m` are used as defaults in various places in ES, there's no need to create a new `TimeValue` instance each time they appear. Moreover we already have constants for `0` and `-1`, but we don't use these constants when reading the values off the wire. This commit adds constants for `30s` and `1m` and adjusts the deserialization code to avoid unnecessary allocation for common `TimeValue` instances. Relates #107984 --- .../org/elasticsearch/core/TimeValue.java | 32 ++++++++++++++----- .../common/unit/TimeValueTests.java | 12 +++++++ .../cluster/health/ClusterHealthRequest.java | 3 +- .../replication/ReplicationRequest.java | 3 +- .../InstanceShardOperationRequest.java | 3 +- .../common/io/stream/StreamInput.java | 12 +++++-- .../common/io/stream/BytesStreamsTests.java | 16 ++++++++++ 7 files changed, 64 insertions(+), 17 deletions(-) diff --git a/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java b/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java index 30883ef3af731..df7c47943289d 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java +++ b/libs/core/src/main/java/org/elasticsearch/core/TimeValue.java @@ -17,9 +17,11 @@ public class TimeValue implements Comparable { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); - public static final TimeValue MINUS_ONE = timeValueMillis(-1); - public static final TimeValue ZERO = timeValueMillis(0); - public static final TimeValue MAX_VALUE = TimeValue.timeValueNanos(Long.MAX_VALUE); + public static final TimeValue MINUS_ONE = new TimeValue(-1, TimeUnit.MILLISECONDS); + public static final TimeValue ZERO = new TimeValue(0, TimeUnit.MILLISECONDS); + public static final TimeValue MAX_VALUE = new TimeValue(Long.MAX_VALUE, TimeUnit.NANOSECONDS); + public static final TimeValue THIRTY_SECONDS = new TimeValue(30, TimeUnit.SECONDS); + public static final TimeValue ONE_MINUTE = new TimeValue(1, TimeUnit.MINUTES); private static final long C0 = 1L; private static final long C1 = C0 * 1000L; @@ -49,14 +51,28 @@ public static TimeValue timeValueNanos(long nanos) { } public static TimeValue timeValueMillis(long millis) { + if (millis == 0) { + return ZERO; + } + if (millis == -1) { + return MINUS_ONE; + } return new TimeValue(millis, TimeUnit.MILLISECONDS); } public static TimeValue timeValueSeconds(long seconds) { + if (seconds == 30) { + // common value, no need to allocate each time + return THIRTY_SECONDS; + } return new TimeValue(seconds, TimeUnit.SECONDS); } public static TimeValue timeValueMinutes(long minutes) { + if (minutes == 1) { + // common value, no need to allocate each time + return ONE_MINUTE; + } return new TimeValue(minutes, TimeUnit.MINUTES); } @@ -355,18 +371,18 @@ public static TimeValue parseTimeValue(@Nullable String sValue, TimeValue defaul } final String normalized = sValue.toLowerCase(Locale.ROOT).trim(); if (normalized.endsWith("nanos")) { - return new TimeValue(parse(sValue, normalized, "nanos", settingName), TimeUnit.NANOSECONDS); + return TimeValue.timeValueNanos(parse(sValue, normalized, "nanos", settingName)); } else if (normalized.endsWith("micros")) { return new TimeValue(parse(sValue, normalized, "micros", settingName), TimeUnit.MICROSECONDS); } else if (normalized.endsWith("ms")) { - return new TimeValue(parse(sValue, normalized, "ms", settingName), TimeUnit.MILLISECONDS); + return TimeValue.timeValueMillis(parse(sValue, normalized, "ms", settingName)); } else if (normalized.endsWith("s")) { - return new TimeValue(parse(sValue, normalized, "s", settingName), TimeUnit.SECONDS); + return TimeValue.timeValueSeconds(parse(sValue, normalized, "s", settingName)); } else if (sValue.endsWith("m")) { // parsing minutes should be case-sensitive as 'M' means "months", not "minutes"; this is the only special case. - return new TimeValue(parse(sValue, normalized, "m", settingName), TimeUnit.MINUTES); + return TimeValue.timeValueMinutes(parse(sValue, normalized, "m", settingName)); } else if (normalized.endsWith("h")) { - return new TimeValue(parse(sValue, normalized, "h", settingName), TimeUnit.HOURS); + return TimeValue.timeValueHours(parse(sValue, normalized, "h", settingName)); } else if (normalized.endsWith("d")) { return new TimeValue(parse(sValue, normalized, "d", settingName), TimeUnit.DAYS); } else if (normalized.matches("-0*1")) { diff --git a/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index a021299aaa06d..b6481db9b9951 100644 --- a/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -242,4 +242,16 @@ private TimeUnit randomTimeUnitObject() { TimeUnit.DAYS ); } + + public void testInternedValues() { + assertSame(TimeValue.timeValueMillis(-1), TimeValue.MINUS_ONE); + assertSame(TimeValue.timeValueMillis(0), TimeValue.ZERO); + assertSame(TimeValue.timeValueSeconds(30), TimeValue.THIRTY_SECONDS); + assertSame(TimeValue.timeValueMinutes(1), TimeValue.ONE_MINUTE); + + assertSame(TimeValue.parseTimeValue("-1", getTestName()), TimeValue.MINUS_ONE); + assertSame(TimeValue.parseTimeValue("0", getTestName()), TimeValue.ZERO); + assertSame(TimeValue.parseTimeValue("30s", getTestName()), TimeValue.THIRTY_SECONDS); + assertSame(TimeValue.parseTimeValue("1m", getTestName()), TimeValue.ONE_MINUTE); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 75313227a6dda..a94555f1dfd1c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -24,13 +24,12 @@ import java.io.IOException; import java.util.Map; -import java.util.concurrent.TimeUnit; public class ClusterHealthRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.lenientExpandHidden(); - private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS); + private TimeValue timeout = TimeValue.timeValueSeconds(30); private ClusterHealthStatus waitForStatus; private boolean waitForNoRelocatingShards = false; private boolean waitForNoInitializingShards = false; diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 1da69d76ebc82..8d388e7c6d4d6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -25,7 +25,6 @@ import java.io.IOException; import java.util.Map; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -35,7 +34,7 @@ */ public abstract class ReplicationRequest> extends ActionRequest implements IndicesRequest { - public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); + public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMinutes(1); /** * Target shard the request should execute on. In case of index and delete requests, diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java index e689492523838..51952059d7d94 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/InstanceShardOperationRequest.java @@ -20,7 +20,6 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.concurrent.TimeUnit; // TODO: This request and its associated transport action can be folded into UpdateRequest which is its only concrete production code // implementation @@ -28,7 +27,7 @@ public abstract class InstanceShardOperationRequest TimeValue.timeValueMillis(duration); + case SECONDS -> TimeValue.timeValueSeconds(duration); + case MINUTES -> TimeValue.timeValueMinutes(duration); + default -> new TimeValue(duration, timeUnit); + }; } /** diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index f60a5a5fc601a..7799c1ff5a34c 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -902,6 +902,22 @@ public void testTimeValueSerialize() throws Exception { assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length()); } + public void testTimeValueInterning() throws IOException { + try (var bytesOut = new BytesStreamOutput()) { + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.MINUS_ONE : new TimeValue(-1, TimeUnit.MILLISECONDS)); + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.ZERO : new TimeValue(0, TimeUnit.MILLISECONDS)); + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.THIRTY_SECONDS : new TimeValue(30, TimeUnit.SECONDS)); + bytesOut.writeTimeValue(randomBoolean() ? TimeValue.ONE_MINUTE : new TimeValue(1, TimeUnit.MINUTES)); + + try (var in = bytesOut.bytes().streamInput()) { + assertSame(TimeValue.MINUS_ONE, in.readTimeValue()); + assertSame(TimeValue.ZERO, in.readTimeValue()); + assertSame(TimeValue.THIRTY_SECONDS, in.readTimeValue()); + assertSame(TimeValue.ONE_MINUTE, in.readTimeValue()); + } + } + } + private static class TestStreamOutput extends BytesStream { private final BytesStreamOutput output = new BytesStreamOutput(); From 85942902638eae1a7288e5d25aeb8fbb938b30ee Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 29 Apr 2024 11:45:12 +0200 Subject: [PATCH 012/244] [Profiling] Add API update reminder for V9 (#107996) With this commit we add a reminder annotation to change the profiling flamegraph API response from Camel Case to Snake Case (which aligns this API casing with other Elasticsearch APIs). --- .../xpack/profiling/action/GetFlamegraphResponse.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java index e4ea3c1521d22..f93223d0e3e49 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetFlamegraphResponse.java @@ -173,6 +173,7 @@ public long getTotalSamples() { return totalSamples; } + @UpdateForV9 // change casing from Camel Case to Snake Case (requires updates in Kibana as well) @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat( From 6ef486519549f37be893b39b7248b49b9518a9d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 29 Apr 2024 12:01:41 +0200 Subject: [PATCH 013/244] Add functionality to test if the host CPU supports native SIMD instructions (#107429) --- libs/native/libraries/build.gradle | 2 +- .../nativeaccess/PosixNativeAccess.java | 2 +- .../VectorSimilarityFunctions.java | 21 +- .../nativeaccess/lib/VectorLibrary.java | 9 +- .../nativeaccess/jdk/JdkVectorLibrary.java | 243 ++++++++++-------- libs/vec/native/publish_vec_binaries.sh | 2 +- libs/vec/native/src/vec/c/vec.c | 28 ++ libs/vec/native/src/vec/headers/vec.h | 8 + 8 files changed, 181 insertions(+), 134 deletions(-) diff --git a/libs/native/libraries/build.gradle b/libs/native/libraries/build.gradle index 73c2c6fe14ba6..e072359620748 100644 --- a/libs/native/libraries/build.gradle +++ b/libs/native/libraries/build.gradle @@ -18,7 +18,7 @@ configurations { } var zstdVersion = "1.5.5" -var vecVersion = "1.0.1" +var vecVersion = "1.0.3" repositories { exclusiveContent { diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java index 993c9d2a874b6..56017d3a8a20a 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/PosixNativeAccess.java @@ -27,7 +27,7 @@ abstract class PosixNativeAccess extends AbstractNativeAccess { static VectorSimilarityFunctions vectorSimilarityFunctionsOrNull(NativeLibraryProvider libraryProvider) { if (isNativeVectorLibSupported()) { - var lib = new VectorSimilarityFunctions(libraryProvider.getLibrary(VectorLibrary.class)); + var lib = libraryProvider.getLibrary(VectorLibrary.class).getVectorSimilarityFunctions(); logger.info("Using native vector library; to disable start with -D" + ENABLE_JDK_VECTOR_LIBRARY + "=false"); return lib; } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java index 7cb852ccf7876..6b8f6048fe058 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/VectorSimilarityFunctions.java @@ -8,25 +8,16 @@ package org.elasticsearch.nativeaccess; -import org.elasticsearch.nativeaccess.lib.VectorLibrary; - import java.lang.invoke.MethodHandle; /** - * Utility class providing vector similarity functions. + * Utility interface providing vector similarity functions. * *

MethodHandles are returned to avoid a static reference to MemorySegment, * which is not in the currently lowest compile version, JDK 17. Code consuming * the method handles will, by definition, require access to MemorySegment. */ -public final class VectorSimilarityFunctions implements VectorLibrary { - - private final VectorLibrary vectorLibrary; - - VectorSimilarityFunctions(VectorLibrary vectorLibrary) { - this.vectorLibrary = vectorLibrary; - } - +public interface VectorSimilarityFunctions { /** * Produces a method handle returning the dot product of byte (signed int8) vectors. * @@ -34,9 +25,7 @@ public final class VectorSimilarityFunctions implements VectorLibrary { * its first and second arguments will be {@code MemorySegment}, whose contents is the * vector data bytes. The third argument is the length of the vector data. */ - public MethodHandle dotProductHandle() { - return vectorLibrary.dotProductHandle(); - } + MethodHandle dotProductHandle(); /** * Produces a method handle returning the square distance of byte (signed int8) vectors. @@ -45,7 +34,5 @@ public MethodHandle dotProductHandle() { * its first and second arguments will be {@code MemorySegment}, whose contents is the * vector data bytes. The third argument is the length of the vector data. */ - public MethodHandle squareDistanceHandle() { - return vectorLibrary.squareDistanceHandle(); - } + MethodHandle squareDistanceHandle(); } diff --git a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java index a11533c29bebc..86d1a82b2bdc9 100644 --- a/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java +++ b/libs/native/src/main/java/org/elasticsearch/nativeaccess/lib/VectorLibrary.java @@ -8,7 +8,8 @@ package org.elasticsearch.nativeaccess.lib; -import java.lang.invoke.MethodHandle; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; /** * A VectorLibrary is just an adaptation of the factory for a NativeLibrary. @@ -16,8 +17,6 @@ * for native implementations. */ public non-sealed interface VectorLibrary extends NativeLibrary { - - MethodHandle dotProductHandle(); - - MethodHandle squareDistanceHandle(); + @Nullable + VectorSimilarityFunctions getVectorSimilarityFunctions(); } diff --git a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java index d4ab57396e290..b988c9730fd1b 100644 --- a/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java +++ b/libs/native/src/main21/java/org/elasticsearch/nativeaccess/jdk/JdkVectorLibrary.java @@ -8,6 +8,7 @@ package org.elasticsearch.nativeaccess.jdk; +import org.elasticsearch.nativeaccess.VectorSimilarityFunctions; import org.elasticsearch.nativeaccess.lib.VectorLibrary; import java.lang.foreign.FunctionDescriptor; @@ -23,142 +24,166 @@ public final class JdkVectorLibrary implements VectorLibrary { + static final VectorSimilarityFunctions INSTANCE; + static { System.loadLibrary("vec"); + final MethodHandle vecCaps$mh = downcallHandle("vec_caps", FunctionDescriptor.of(JAVA_INT)); + + try { + int caps = (int) vecCaps$mh.invokeExact(); + if (caps != 0) { + INSTANCE = new JdkVectorSimilarityFunctions(); + } else { + INSTANCE = null; + } + } catch (Throwable t) { + throw new AssertionError(t); + } } public JdkVectorLibrary() {} - static final MethodHandle dot8stride$mh = downcallHandle("dot8s_stride", FunctionDescriptor.of(JAVA_INT)); - static final MethodHandle sqr8stride$mh = downcallHandle("sqr8s_stride", FunctionDescriptor.of(JAVA_INT)); + @Override + public VectorSimilarityFunctions getVectorSimilarityFunctions() { + return INSTANCE; + } - static final MethodHandle dot8s$mh = downcallHandle("dot8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); - static final MethodHandle sqr8s$mh = downcallHandle("sqr8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); + private static final class JdkVectorSimilarityFunctions implements VectorSimilarityFunctions { - // Stride of the native implementation - consumes this number of bytes per loop invocation. - // There must be at least this number of bytes/elements available when going native - static final int DOT_STRIDE = 32; - static final int SQR_STRIDE = 16; + static final MethodHandle dot8stride$mh = downcallHandle("dot8s_stride", FunctionDescriptor.of(JAVA_INT)); + static final MethodHandle sqr8stride$mh = downcallHandle("sqr8s_stride", FunctionDescriptor.of(JAVA_INT)); - static { - assert DOT_STRIDE > 0 && (DOT_STRIDE & (DOT_STRIDE - 1)) == 0 : "Not a power of two"; - assert dot8Stride() == DOT_STRIDE : dot8Stride() + " != " + DOT_STRIDE; - assert SQR_STRIDE > 0 && (SQR_STRIDE & (SQR_STRIDE - 1)) == 0 : "Not a power of two"; - assert sqr8Stride() == SQR_STRIDE : sqr8Stride() + " != " + SQR_STRIDE; - } + static final MethodHandle dot8s$mh = downcallHandle("dot8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); + static final MethodHandle sqr8s$mh = downcallHandle("sqr8s", FunctionDescriptor.of(JAVA_INT, ADDRESS, ADDRESS, JAVA_INT)); - /** - * Computes the dot product of given byte vectors. - * @param a address of the first vector - * @param b address of the second vector - * @param length the vector dimensions - */ - static int dotProduct(MemorySegment a, MemorySegment b, int length) { - assert length >= 0; - if (a.byteSize() != b.byteSize()) { - throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); - } - if (length > a.byteSize()) { - throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); - } - int i = 0; - int res = 0; - if (length >= DOT_STRIDE) { - i += length & ~(DOT_STRIDE - 1); - res = dot8s(a, b, i); - } + // Stride of the native implementation - consumes this number of bytes per loop invocation. + // There must be at least this number of bytes/elements available when going native + static final int DOT_STRIDE = 32; + static final int SQR_STRIDE = 16; - // tail - for (; i < length; i++) { - res += a.get(JAVA_BYTE, i) * b.get(JAVA_BYTE, i); + static { + assert DOT_STRIDE > 0 && (DOT_STRIDE & (DOT_STRIDE - 1)) == 0 : "Not a power of two"; + assert dot8Stride() == DOT_STRIDE : dot8Stride() + " != " + DOT_STRIDE; + assert SQR_STRIDE > 0 && (SQR_STRIDE & (SQR_STRIDE - 1)) == 0 : "Not a power of two"; + assert sqr8Stride() == SQR_STRIDE : sqr8Stride() + " != " + SQR_STRIDE; } - assert i == length; - return res; - } - /** - * Computes the square distance of given byte vectors. - * @param a address of the first vector - * @param b address of the second vector - * @param length the vector dimensions - */ - static int squareDistance(MemorySegment a, MemorySegment b, int length) { - assert length >= 0; - if (a.byteSize() != b.byteSize()) { - throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); - } - if (length > a.byteSize()) { - throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); - } - int i = 0; - int res = 0; - if (length >= SQR_STRIDE) { - i += length & ~(SQR_STRIDE - 1); - res = sqr8s(a, b, i); + /** + * Computes the dot product of given byte vectors. + * + * @param a address of the first vector + * @param b address of the second vector + * @param length the vector dimensions + */ + static int dotProduct(MemorySegment a, MemorySegment b, int length) { + assert length >= 0; + if (a.byteSize() != b.byteSize()) { + throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); + } + if (length > a.byteSize()) { + throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); + } + int i = 0; + int res = 0; + if (length >= DOT_STRIDE) { + i += length & ~(DOT_STRIDE - 1); + res = dot8s(a, b, i); + } + + // tail + for (; i < length; i++) { + res += a.get(JAVA_BYTE, i) * b.get(JAVA_BYTE, i); + } + assert i == length; + return res; } - // tail - for (; i < length; i++) { - int dist = a.get(JAVA_BYTE, i) - b.get(JAVA_BYTE, i); - res += dist * dist; + /** + * Computes the square distance of given byte vectors. + * + * @param a address of the first vector + * @param b address of the second vector + * @param length the vector dimensions + */ + static int squareDistance(MemorySegment a, MemorySegment b, int length) { + assert length >= 0; + if (a.byteSize() != b.byteSize()) { + throw new IllegalArgumentException("dimensions differ: " + a.byteSize() + "!=" + b.byteSize()); + } + if (length > a.byteSize()) { + throw new IllegalArgumentException("length: " + length + ", greater than vector dimensions: " + a.byteSize()); + } + int i = 0; + int res = 0; + if (length >= SQR_STRIDE) { + i += length & ~(SQR_STRIDE - 1); + res = sqr8s(a, b, i); + } + + // tail + for (; i < length; i++) { + int dist = a.get(JAVA_BYTE, i) - b.get(JAVA_BYTE, i); + res += dist * dist; + } + assert i == length; + return res; } - assert i == length; - return res; - } - private static int dot8Stride() { - try { - return (int) dot8stride$mh.invokeExact(); - } catch (Throwable t) { - throw new AssertionError(t); + private static int dot8Stride() { + try { + return (int) dot8stride$mh.invokeExact(); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - private static int sqr8Stride() { - try { - return (int) sqr8stride$mh.invokeExact(); - } catch (Throwable t) { - throw new AssertionError(t); + private static int sqr8Stride() { + try { + return (int) sqr8stride$mh.invokeExact(); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - private static int dot8s(MemorySegment a, MemorySegment b, int length) { - try { - return (int) dot8s$mh.invokeExact(a, b, length); - } catch (Throwable t) { - throw new AssertionError(t); + private static int dot8s(MemorySegment a, MemorySegment b, int length) { + try { + return (int) dot8s$mh.invokeExact(a, b, length); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - private static int sqr8s(MemorySegment a, MemorySegment b, int length) { - try { - return (int) sqr8s$mh.invokeExact(a, b, length); - } catch (Throwable t) { - throw new AssertionError(t); + private static int sqr8s(MemorySegment a, MemorySegment b, int length) { + try { + return (int) sqr8s$mh.invokeExact(a, b, length); + } catch (Throwable t) { + throw new AssertionError(t); + } } - } - - static final MethodHandle DOT_HANDLE; - static final MethodHandle SQR_HANDLE; - static { - try { - var lookup = MethodHandles.lookup(); - var mt = MethodType.methodType(int.class, MemorySegment.class, MemorySegment.class, int.class); - DOT_HANDLE = lookup.findStatic(JdkVectorLibrary.class, "dotProduct", mt); - SQR_HANDLE = lookup.findStatic(JdkVectorLibrary.class, "squareDistance", mt); - } catch (NoSuchMethodException | IllegalAccessException e) { - throw new RuntimeException(e); + static final MethodHandle DOT_HANDLE; + static final MethodHandle SQR_HANDLE; + + static { + try { + var lookup = MethodHandles.lookup(); + var mt = MethodType.methodType(int.class, MemorySegment.class, MemorySegment.class, int.class); + DOT_HANDLE = lookup.findStatic(JdkVectorSimilarityFunctions.class, "dotProduct", mt); + SQR_HANDLE = lookup.findStatic(JdkVectorSimilarityFunctions.class, "squareDistance", mt); + } catch (NoSuchMethodException | IllegalAccessException e) { + throw new RuntimeException(e); + } } - } - @Override - public MethodHandle dotProductHandle() { - return DOT_HANDLE; - } + @Override + public MethodHandle dotProductHandle() { + return DOT_HANDLE; + } - @Override - public MethodHandle squareDistanceHandle() { - return SQR_HANDLE; + @Override + public MethodHandle squareDistanceHandle() { + return SQR_HANDLE; + } } } diff --git a/libs/vec/native/publish_vec_binaries.sh b/libs/vec/native/publish_vec_binaries.sh index 6cdea109c2eb7..7c460eb0321c9 100755 --- a/libs/vec/native/publish_vec_binaries.sh +++ b/libs/vec/native/publish_vec_binaries.sh @@ -19,7 +19,7 @@ if [ -z "$ARTIFACTORY_API_KEY" ]; then exit 1; fi -VERSION="1.0.1" +VERSION="1.0.3" ARTIFACTORY_REPOSITORY="${ARTIFACTORY_REPOSITORY:-https://artifactory.elastic.dev/artifactory/elasticsearch-native/}" TEMP=$(mktemp -d) diff --git a/libs/vec/native/src/vec/c/vec.c b/libs/vec/native/src/vec/c/vec.c index 008129b665d01..46cc6722d01d0 100644 --- a/libs/vec/native/src/vec/c/vec.c +++ b/libs/vec/native/src/vec/c/vec.c @@ -18,6 +18,34 @@ #define SQR8S_STRIDE_BYTES_LEN 16 #endif +#ifdef __linux__ + #include + #include + #ifndef HWCAP_NEON + #define HWCAP_NEON 0x1000 + #endif +#endif + +#ifdef __APPLE__ +#include +#endif + +EXPORT int vec_caps() { +#ifdef __APPLE__ + #ifdef TARGET_OS_OSX + // All M series Apple silicon support Neon instructions + return 1; + #else + #error "Unsupported Apple platform" + #endif +#elif __linux__ + int hwcap = getauxval(AT_HWCAP); + return (hwcap & HWCAP_NEON) != 0; +#else + #error "Unsupported aarch64 platform" +#endif +} + EXPORT int dot8s_stride() { return DOT8_STRIDE_BYTES_LEN; } diff --git a/libs/vec/native/src/vec/headers/vec.h b/libs/vec/native/src/vec/headers/vec.h index a717ad2712e1c..380111107f383 100644 --- a/libs/vec/native/src/vec/headers/vec.h +++ b/libs/vec/native/src/vec/headers/vec.h @@ -6,7 +6,15 @@ * Side Public License, v 1. */ +#ifdef _MSC_VER +#define EXPORT extern "C" __declspec(dllexport) +#elif defined(__GNUC__) && !defined(__clang__) #define EXPORT __attribute__((externally_visible,visibility("default"))) +#elif __clang__ +#define EXPORT __attribute__((visibility("default"))) +#endif + +EXPORT int vec_caps(); EXPORT int dot8s_stride(); From eedeb4ba47ff6c35a2998dd817e30a2116f9a126 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 29 Apr 2024 12:49:15 +0200 Subject: [PATCH 014/244] Increase size of big arrays only when there is an actual value in the aggregators (Analytics module) (#107813) Similar to #107764 but in the analytics module. --- docs/changelog/107813.yaml | 6 + .../analytics/boxplot/BoxplotAggregator.java | 2 +- .../rate/HistogramRateAggregator.java | 7 +- .../analytics/rate/NumericRateAggregator.java | 6 +- .../stringstats/StringStatsAggregator.java | 21 ++- .../ttest/UnpairedTTestAggregator.java | 5 +- .../test/analytics/empty_field_metric.yml | 135 ++++++++++++++++++ 7 files changed, 161 insertions(+), 21 deletions(-) create mode 100644 docs/changelog/107813.yaml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/empty_field_metric.yml diff --git a/docs/changelog/107813.yaml b/docs/changelog/107813.yaml new file mode 100644 index 0000000000000..1cbb518a8be5b --- /dev/null +++ b/docs/changelog/107813.yaml @@ -0,0 +1,6 @@ +pr: 107813 +summary: Increase size of big arrays only when there is an actual value in the aggregators + (Analytics module) +area: Aggregations +type: enhancement +issues: [] diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java index 223a36098d6b1..73ed4d90ce431 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregator.java @@ -72,8 +72,8 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - TDigestState state = getExistingOrNewHistogram(bigArrays(), bucket); if (values.advanceExact(doc)) { + TDigestState state = getExistingOrNewHistogram(bigArrays(), bucket); final HistogramValue sketch = values.histogram(); while (sketch.next()) { state.add(sketch.value(), sketch.count()); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java index c0ef9b4f7374b..5499ec8fa8c02 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/HistogramRateAggregator.java @@ -42,10 +42,11 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - sums = bigArrays().grow(sums, bucket + 1); - compensations = bigArrays().grow(compensations, bucket + 1); - if (values.advanceExact(doc)) { + + sums = bigArrays().grow(sums, bucket + 1); + compensations = bigArrays().grow(compensations, bucket + 1); + final HistogramValue sketch = values.histogram(); while (sketch.next()) { double sum = sums.get(bucket); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java index 964dac08c097e..b8a4e20111365 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/NumericRateAggregator.java @@ -70,10 +70,10 @@ public void collect(int doc, long bucket) throws IOException { return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - sums = bigArrays().grow(sums, bucket + 1); - compensations = bigArrays().grow(compensations, bucket + 1); - if (values.advanceExact(doc)) { + sums = bigArrays().grow(sums, bucket + 1); + compensations = bigArrays().grow(compensations, bucket + 1); + final int valuesCount = values.docValueCount(); // Compute the sum of double values with Kahan summation algorithm which is more // accurate than naive summation. diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java index 8fd75dc4c2e35..2d39e47e7d1c7 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregator.java @@ -89,18 +89,17 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long bucket) throws IOException { - final long overSize = BigArrays.overSize(bucket + 1); - if (bucket >= count.size()) { - final long from = count.size(); - count = bigArrays().resize(count, overSize); - totalLength = bigArrays().resize(totalLength, overSize); - minLength = bigArrays().resize(minLength, overSize); - maxLength = bigArrays().resize(maxLength, overSize); - minLength.fill(from, overSize, Integer.MAX_VALUE); - maxLength.fill(from, overSize, Integer.MIN_VALUE); - } - if (values.advanceExact(doc)) { + final long overSize = BigArrays.overSize(bucket + 1); + if (bucket >= count.size()) { + final long from = count.size(); + count = bigArrays().resize(count, overSize); + totalLength = bigArrays().resize(totalLength, overSize); + minLength = bigArrays().resize(minLength, overSize); + maxLength = bigArrays().resize(maxLength, overSize); + minLength.fill(from, overSize, Integer.MAX_VALUE); + maxLength.fill(from, overSize, Integer.MIN_VALUE); + } final int valuesCount = values.docValueCount(); count.increment(bucket, valuesCount); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java index 0f74e3466dd0a..004637a7df7f9 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/UnpairedTTestAggregator.java @@ -86,7 +86,7 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, return new LeafBucketCollectorBase(sub, docAValues) { - private static void processValues( + private void processValues( int doc, long bucket, SortedNumericDoubleValues docValues, @@ -95,6 +95,7 @@ private static void processValues( TTestStatsBuilder builder ) throws IOException { if (docValues.advanceExact(doc)) { + builder.grow(bigArrays(), bucket + 1); final int numValues = docValues.docValueCount(); for (int i = 0; i < numValues; i++) { builder.addValue(compSum, compSumOfSqr, bucket, docValues.nextValue()); @@ -105,12 +106,10 @@ private static void processValues( @Override public void collect(int doc, long bucket) throws IOException { if (bitsA == null || bitsA.get(doc)) { - a.grow(bigArrays(), bucket + 1); processValues(doc, bucket, docAValues, compSumA, compSumOfSqrA, a); } if (bitsB == null || bitsB.get(doc)) { processValues(doc, bucket, docBValues, compSumB, compSumOfSqrB, b); - b.grow(bigArrays(), bucket + 1); } } }; diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/empty_field_metric.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/empty_field_metric.yml new file mode 100644 index 0000000000000..891e02bc2dcf5 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/analytics/empty_field_metric.yml @@ -0,0 +1,135 @@ +setup: + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + terms_field: + type: keyword + date_field: + type: date + int_field: + type : integer + double_field: + type : double + string_field: + type: keyword + histogram_field: + type: histogram + + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: "1" + - terms_field: foo + date_field: 2024-01-02 + - index: + _index: test_1 + _id: "2" + - terms_field: foo + date_field: 2024-01-02 + - index: + _index: test_1 + _id: "3" + - terms_field: bar + date_field: 2024-01-01 + +--- +"Basic test": + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + aggs: + the_terms: + terms: + field: terms_field + "order": + "_key": "asc" + aggs: + boxplot_agg: + boxplot: + field: double_field + t_test_agg: + t_test: + a: + field: double_field + b: + field: int_field + type: paired + string_stats_agg: + string_stats: + field: string_field + + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + - match: { aggregations.the_terms.buckets.0.key: bar} + - match: { aggregations.the_terms.buckets.0.doc_count: 1} + - exists: aggregations.the_terms.buckets.0.boxplot_agg.min + - exists: aggregations.the_terms.buckets.0.boxplot_agg.max + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q3 + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q1 + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q2 + - exists: aggregations.the_terms.buckets.0.boxplot_agg.q3 + - match: { aggregations.the_terms.buckets.0.t_test_agg.value: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.count: 0 } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.min_length: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.max_length: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.avg_length: null } + - match: { aggregations.the_terms.buckets.0.string_stats_agg.entropy: 0 } + - match: { aggregations.the_terms.buckets.1.key: foo} + - match: { aggregations.the_terms.buckets.1.doc_count: 2} + - exists: aggregations.the_terms.buckets.1.boxplot_agg.min + - exists: aggregations.the_terms.buckets.1.boxplot_agg.max + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q3 + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q1 + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q2 + - exists: aggregations.the_terms.buckets.1.boxplot_agg.q3 + - match: { aggregations.the_terms.buckets.1.t_test_agg.value: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.count: 0 } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.min_length: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.max_length: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.avg_length: null } + - match: { aggregations.the_terms.buckets.1.string_stats_agg.entropy: 0 } + +--- +"Rate test": + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + aggs: + the_date_hist: + date_histogram: + field: date_field + calendar_interval: day + format: yyyy-MM-dd + aggs: + rate_agg: + rate: + field: double_field + rate_hist_agg: + rate: + field: histogram_field + + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + - match: { aggregations.the_date_hist.buckets.0.key_as_string: 2024-01-01 } + - match: { aggregations.the_date_hist.buckets.0.doc_count: 1 } + - match: { aggregations.the_date_hist.buckets.0.rate_agg.value: 0.0 } + - match: { aggregations.the_date_hist.buckets.0.rate_hist_agg.value: 0.0 } + - match: { aggregations.the_date_hist.buckets.1.key_as_string: 2024-01-02 } + - match: { aggregations.the_date_hist.buckets.1.doc_count: 2 } + - match: { aggregations.the_date_hist.buckets.1.rate_agg.value: 0.0 } + - match: { aggregations.the_date_hist.buckets.1.rate_hist_agg.value: 0.0 } + From 6409f7e99625384152c19ad8d4fe0203fe4e52de Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Mon, 29 Apr 2024 12:13:51 +0100 Subject: [PATCH 015/244] Update min CCS version for 8.14 release (#107939) --- .../org/elasticsearch/TransportVersions.java | 2 +- .../TransportResolveClusterActionTests.java | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 0c9c47eb589a2..e05bf3ffeda0f 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -248,7 +248,7 @@ static TransportVersion def(int id) { * Reference to the minimum transport version that can be used with CCS. * This should be the transport version used by the previous minor release. */ - public static final TransportVersion MINIMUM_CCS_VERSION = V_8_12_0; + public static final TransportVersion MINIMUM_CCS_VERSION = V_8_13_0; static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java index 2604461d12466..2a64fbad97575 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveClusterActionTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; -import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesActionTests; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; @@ -50,27 +49,29 @@ public void tearDown() throws Exception { public void testCCSCompatibilityCheck() { Settings settings = Settings.builder() - .put("node.name", TransportFieldCapabilitiesActionTests.class.getSimpleName()) + .put("node.name", TransportResolveClusterActionTests.class.getSimpleName()) .put(SearchService.CCS_VERSION_CHECK_SETTING.getKey(), "true") .build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); + TransportVersion nextTransportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, VersionInformation.CURRENT, - transportVersion, + nextTransportVersion, threadPool ); ResolveClusterActionRequest request = new ResolveClusterActionRequest(new String[] { "test" }) { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getTransportVersion().before(transportVersion)) { - throw new IllegalArgumentException("This request isn't serializable before transport version " + transportVersion); - } + throw new UnsupportedOperationException( + "ResolveClusterAction requires at least Transport Version " + + TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED.toReleaseVersion() + + " but was " + + out.getTransportVersion().toReleaseVersion() + ); } }; ClusterService clusterService = new ClusterService( From 98dcd565ebcc59405f31630d9a252891768dee95 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 29 Apr 2024 13:23:57 +0200 Subject: [PATCH 016/244] Relax assertion on `progress` value being updated to `end` (#107356) This change removes an assertion in ProgressListenableActionFuture that checks if the progress value has been updated up to the end of the completed range. But ranges may or may not be fully written before being marked as completed (the remaining bytes can just be padding bytes), so is no point on advancing the progress up to the end of the range before range completion. --- .../blobcache/common/ProgressListenableActionFuture.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java index 7b8621f8821a6..0912da200735e 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/ProgressListenableActionFuture.java @@ -136,7 +136,7 @@ protected void done(boolean success) { super.done(success); final List>> listenersToExecute; synchronized (this) { - assert progress == end || success == false; + assert completed == false; completed = true; listenersToExecute = this.listeners; listeners = null; From e48457d4379f5cf2fb15f17e9b0ccf65a4b45084 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Mon, 29 Apr 2024 13:24:31 +0200 Subject: [PATCH 017/244] Fix failing mixed cluster test (#107993) --- .../xpack/security/support/SecurityIndexManager.java | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 773573d02e45a..95574c317495a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -214,13 +214,9 @@ public void removeStateListener(BiConsumer listener) { * Get the minimum security index mapping version in the cluster */ private SystemIndexDescriptor.MappingsVersion getMinSecurityIndexMappingVersion(ClusterState clusterState) { - var minClusterVersion = clusterState.getMinSystemIndexMappingVersions().get(systemIndexDescriptor.getPrimaryIndex()); - // Can be null in mixed clusters. This indicates that the cluster state and index needs to be updated with the latest mapping - // version from the index descriptor - if (minClusterVersion == null) { - return systemIndexDescriptor.getMappingsVersion(); - } - return minClusterVersion; + SystemIndexDescriptor.MappingsVersion mappingsVersion = clusterState.getMinSystemIndexMappingVersions() + .get(systemIndexDescriptor.getPrimaryIndex()); + return mappingsVersion == null ? new SystemIndexDescriptor.MappingsVersion(1, 0) : mappingsVersion; } @Override From 35600c53b947841eeabae9fe2ca31cb74c3fb95c Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Apr 2024 12:31:39 +0100 Subject: [PATCH 018/244] Encapsulate IndexShard/IndexService closing in tests (#107974) Closing an `IndexShard` or an `IndexService` requires some parameters that are not really important in tests. This commit encapsulates these calls into utility methods to populate those parameters with sensible values. Moreover it reduces some unnecessary noise in a follow-up PR which will make this closing process a little more complicated in production code without affecting all these tests. Relates #107952 --- .../diskusage/IndexDiskUsageAnalyzerIT.java | 3 +- .../shard/GlobalCheckpointListenersIT.java | 3 +- .../index/shard/IndexShardIT.java | 5 ++-- .../search/fieldcaps/FieldCapabilitiesIT.java | 3 +- .../elasticsearch/index/IndexModuleTests.java | 30 ++++++++++--------- .../index/IndexServiceTests.java | 10 +++++-- .../index/engine/NoOpEngineRecoveryTests.java | 2 +- .../RecoveryDuringReplicationTests.java | 10 +++---- .../index/shard/IndexShardTests.java | 12 ++++---- .../IndexingMemoryControllerTests.java | 2 +- .../PeerRecoveryTargetServiceTests.java | 2 +- .../indices/recovery/RecoveryStatusTests.java | 4 ++- .../indices/recovery/RecoveryTests.java | 6 ++-- .../BlobStoreRepositoryRestoreTests.java | 4 +-- .../index/shard/IndexShardTestCase.java | 23 +++++++++++++- .../ShardFollowTaskReplicationTests.java | 2 +- .../engine/frozen/FrozenIndexShardTests.java | 2 +- 17 files changed, 78 insertions(+), 45 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index 235d1592cf7c7..6f6db1d2d3d82 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -39,6 +39,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.IntStream; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; @@ -260,7 +261,7 @@ public void testFailingTargetShards() throws Exception { IndexShard indexShard = indicesService.getShardOrNull(shardId); assertNotNull("No shard found for shard " + shardId, indexShard); logger.info("--> failing shard {} on node {}", shardRequest.shardId(), node); - indexShard.close("test", randomBoolean()); + closeShardNoCheck(indexShard, randomBoolean()); failedShards.incrementAndGet(); } else { successfulShards.incrementAndGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java index b38198a98b5a5..256bdd45afbf7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/GlobalCheckpointListenersIT.java @@ -25,6 +25,7 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -101,7 +102,7 @@ public void accept(final long g, final Exception e) { } }, null); - shard.close("closed", randomBoolean()); + closeShardNoCheck(shard, randomBoolean()); assertBusy(() -> assertTrue(invoked.get())); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index ec9373120f491..c01d945ca2a1a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -93,6 +93,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBuilder; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog; import static org.elasticsearch.index.shard.IndexShardTestCase.recoverFromStore; import static org.elasticsearch.test.LambdaMatchers.falseWith; @@ -545,7 +546,7 @@ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { prepareIndex("test").setId("1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); CheckedFunction wrapper = directoryReader -> directoryReader; - shard.close("simon says", false); + closeShardNoCheck(shard); AtomicReference shardRef = new AtomicReference<>(); List failures = new ArrayList<>(); IndexingOperationListener listener = new IndexingOperationListener() { @@ -583,7 +584,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul try { ExceptionsHelper.rethrowAndSuppress(failures); } finally { - newShard.close("just do it", randomBoolean()); + closeShardNoCheck(newShard, randomBoolean()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 64f04d46a9a90..4446338c4ff2a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -83,6 +83,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.array; @@ -526,7 +527,7 @@ private void moveOrCloseShardsOnNodes(String nodeName) throws Exception { for (IndexService indexService : indicesService) { for (IndexShard indexShard : indexService) { if (randomBoolean()) { - indexShard.close("test", randomBoolean()); + closeShardNoCheck(indexShard, randomBoolean()); } else if (randomBoolean()) { final ShardId shardId = indexShard.shardId(); final String[] nodeNames = internalCluster().getNodeNames(); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 4e6f702b67252..977ab9bcedd75 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -113,6 +113,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; +import static org.elasticsearch.index.IndexServiceTests.closeIndexService; +import static org.elasticsearch.index.shard.IndexShardTestCase.flushAndCloseShardNoCheck; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -240,7 +242,7 @@ public void testWrapperIsBound() throws IOException { IndexService indexService = newIndexService(module); assertTrue(indexService.getReaderWrapper() instanceof Wrapper); assertSame(indexService.getEngineFactory(), module.getEngineFactory()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testRegisterIndexStore() throws IOException { @@ -265,7 +267,7 @@ public void testRegisterIndexStore() throws IOException { final IndexService indexService = newIndexService(module); assertThat(indexService.getDirectoryFactory(), instanceOf(FooFunction.class)); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testDirectoryWrapper() throws IOException { @@ -311,7 +313,7 @@ public void testDirectoryWrapper() throws IOException { assertThat(((WrappedDirectory) directory).shardRouting, sameInstance(shardRouting)); assertThat(directory, instanceOf(FilterDirectory.class)); - indexService.close("test done", false); + closeIndexService(indexService); } public void testOtherServiceBound() throws IOException { @@ -331,7 +333,7 @@ public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason rea assertEquals(x.getIndex(), index); indexService.getIndexEventListener().beforeIndexRemoved(null, null); assertTrue(atomicBoolean.get()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testListener() throws IOException { @@ -352,7 +354,7 @@ public void testListener() throws IOException { IndexService indexService = newIndexService(module); assertSame(booleanSetting, indexService.getIndexSettings().getScopedSettings().get(booleanSetting.getKey())); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testAddIndexOperationListener() throws IOException { @@ -383,7 +385,7 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { l.preIndex(shardId, index); } assertTrue(executed.get()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testAddSearchOperationListener() throws IOException { @@ -409,7 +411,7 @@ public void onNewReaderContext(ReaderContext readerContext) { l.onNewReaderContext(mock(ReaderContext.class)); } assertTrue(executed.get()); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testAddSimilarity() throws IOException { @@ -435,7 +437,7 @@ public void testAddSimilarity() throws IOException { assertThat(similarity, Matchers.instanceOf(TestSimilarity.class)); assertEquals("my_similarity", similarityService.getSimilarity("my_similarity").name()); assertEquals("there is a key", ((TestSimilarity) similarity).key); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testFrozen() { @@ -496,7 +498,7 @@ public void testForceCustomQueryCache() throws IOException { ); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof CustomQueryCache); - indexService.close("simon says", false); + closeIndexService(indexService); assertThat(liveQueryCaches, empty()); } @@ -509,7 +511,7 @@ public void testDefaultQueryCacheImplIsSelected() throws IOException { IndexModule module = createIndexModule(indexSettings, emptyAnalysisRegistry, indexNameExpressionResolver); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof IndexQueryCache); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testDisableQueryCacheHasPrecedenceOverForceQueryCache() throws IOException { @@ -523,7 +525,7 @@ public void testDisableQueryCacheHasPrecedenceOverForceQueryCache() throws IOExc module.forceQueryCacheProvider((a, b) -> new CustomQueryCache(null)); IndexService indexService = newIndexService(module); assertTrue(indexService.cache().query() instanceof DisabledQueryCache); - indexService.close("simon says", false); + closeIndexService(indexService); } public void testCustomQueryCacheCleanedUpIfIndexServiceCreationFails() { @@ -644,7 +646,7 @@ public void testRegisterCustomRecoveryStateFactory() throws IOException { assertThat(indexService.createRecoveryState(shard, mock(DiscoveryNode.class), mock(DiscoveryNode.class)), is(recoveryState)); - indexService.close("closing", false); + closeIndexService(indexService); } public void testIndexCommitListenerIsBound() throws IOException, ExecutionException, InterruptedException { @@ -694,10 +696,10 @@ public void onIndexCommitDelete(ShardId shardId, IndexCommit deletedCommit) { ).initialize("_node_id", null, -1); IndexService indexService = newIndexService(module); - closeables.add(() -> indexService.close("close index service at end of test", false)); + closeables.add(() -> closeIndexService(indexService)); IndexShard indexShard = indexService.createShard(shardRouting, IndexShardTestCase.NOOP_GCP_SYNCER, RetentionLeaseSyncer.EMPTY); - closeables.add(() -> indexShard.close("close shard at end of test", true)); + closeables.add(() -> flushAndCloseShardNoCheck(indexShard)); indexShard.markAsRecovering("test", new RecoveryState(shardRouting, DiscoveryNodeUtils.create("_node_id", "_node_id"), null)); final PlainActionFuture recoveryFuture = new PlainActionFuture<>(); diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index d2304908a933b..06aa88e2de4a2 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -145,7 +145,7 @@ protected void runInternal() {} assertFalse(task.isClosed()); assertTrue(task.isScheduled()); - indexService.close("simon says", false); + closeIndexService(indexService); assertFalse("no shards left", task.mustReschedule()); assertTrue(task.isScheduled()); task.close(); @@ -222,7 +222,7 @@ public void testRefreshTaskIsUpdated() throws Exception { assertTrue(refreshTask.isScheduled()); assertFalse(refreshTask.isClosed()); - indexService.close("simon says", false); + closeIndexService(indexService); assertFalse(refreshTask.isScheduled()); assertTrue(refreshTask.isClosed()); } @@ -260,7 +260,7 @@ public void testFsyncTaskIsRunning() throws Exception { assertTrue(fsyncTask.isScheduled()); assertFalse(fsyncTask.isClosed()); - indexService.close("simon says", false); + closeIndexService(indexService); assertFalse(fsyncTask.isScheduled()); assertTrue(fsyncTask.isClosed()); @@ -459,4 +459,8 @@ public void testUpdateSyncIntervalDynamically() { indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertEquals("20s", indexMetadata.getSettings().get(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey())); } + + public static void closeIndexService(IndexService indexService) throws IOException { + indexService.close("IndexServiceTests#closeIndexService", false); + } } diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java index 0a7c4fa866008..cd8539e4c1b6a 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineRecoveryTests.java @@ -26,7 +26,7 @@ public void testRecoverFromNoOp() throws IOException { for (int i = 0; i < nbDocs; i++) { indexDoc(indexShard, "_doc", String.valueOf(i)); } - indexShard.close("test", true); + flushAndCloseShardNoCheck(indexShard); final ShardRouting shardRouting = indexShard.routingEntry(); IndexShard primary = reinitShard( diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 798f52cfbdc19..330571d53f29a 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -134,10 +134,10 @@ public void testRecoveryToReplicaThatReceivedExtraDocument() throws Exception { new SourceToParse("replica", new BytesArray("{}"), XContentType.JSON) ); shards.promoteReplicaToPrimary(promotedReplica).get(); - oldPrimary.close("demoted", randomBoolean()); + closeShardNoCheck(oldPrimary, randomBoolean()); oldPrimary.store().close(); shards.removeReplica(remainingReplica); - remainingReplica.close("disconnected", false); + closeShardNoCheck(remainingReplica); remainingReplica.store().close(); // randomly introduce a conflicting document final boolean extra = randomBoolean(); @@ -260,7 +260,7 @@ public void testRecoveryAfterPrimaryPromotion() throws Exception { newPrimary.flush(new FlushRequest()); } - oldPrimary.close("demoted", false); + closeShardNoCheck(oldPrimary); oldPrimary.store().close(); IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); @@ -306,7 +306,7 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { shards.promoteReplicaToPrimary(newPrimary).get(); // Recover a replica should rollback the stale documents shards.removeReplica(replica); - replica.close("recover replica - first time", false); + closeShardNoCheck(replica); replica.store().close(); replica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(replica); @@ -317,7 +317,7 @@ public void testReplicaRollbackStaleDocumentsInPeerRecovery() throws Exception { assertThat(replica.getLastSyncedGlobalCheckpoint(), equalTo(replica.seqNoStats().getMaxSeqNo())); // Recover a replica again should also rollback the stale documents. shards.removeReplica(replica); - replica.close("recover replica - second time", false); + closeShardNoCheck(replica); replica.store().close(); IndexShard anotherReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(anotherReplica); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index c9f8372f54793..4577777d139cd 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -276,7 +276,7 @@ public void testFailShard() throws Exception { assertNotNull(shardPath); // fail shard shard.failShard("test shard fail", new CorruptIndexException("", "")); - shard.close("do not assert history", false); + closeShardNoCheck(shard); shard.store().close(); // check state file still exists ShardStateMetadata shardStateMetadata = load(logger, shardPath.getShardStatePath()); @@ -1477,7 +1477,7 @@ public void testSnapshotStore() throws IOException { snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); - newShard.close("test", false); + closeShardNoCheck(newShard); snapshot = newShard.snapshotStoreMetadata(); assertThat(snapshot.getSegmentsFile().name(), equalTo("segments_3")); @@ -1766,7 +1766,7 @@ public void testIndexingOperationsListeners() throws IOException { AtomicInteger preDelete = new AtomicInteger(); AtomicInteger postDelete = new AtomicInteger(); AtomicInteger postDeleteException = new AtomicInteger(); - shard.close("simon says", true); + flushAndCloseShardNoCheck(shard); shard = reinitShard(shard, new IndexingOperationListener() { @Override public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { @@ -1848,7 +1848,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { assertEquals(1, postDelete.get()); assertEquals(0, postDeleteException.get()); - shard.close("Unexpected close", true); + closeShardNoCheck(shard); shard.state = IndexShardState.STARTED; // It will generate exception try { @@ -4372,7 +4372,7 @@ public void recoverFromTranslog( Thread closeShardThread = new Thread(() -> { try { safeAwait(readyToCloseLatch); - shard.close("testing", false); + closeShardNoCheck(shard); // in integration tests, this is done as a listener on IndexService. MockFSDirectoryFactory.checkIndex(logger, shard.store(), shard.shardId); } catch (IOException e) { @@ -4813,7 +4813,7 @@ public void testCloseShardWhileEngineIsWarming() throws Exception { recoveryThread.start(); try { warmerStarted.await(); - shard.close("testing", false); + closeShardNoCheck(shard); assertThat(shard.state, equalTo(IndexShardState.CLOSED)); } finally { warmerBlocking.countDown(); diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 7535f900ff2d1..4c6d6f563b950 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -363,7 +363,7 @@ public void testTranslogRecoveryWorksWithIMC() throws IOException { for (int i = 0; i < 100; i++) { indexDoc(shard, Integer.toString(i), "{\"foo\" : \"bar\"}", XContentType.JSON, null); } - shard.close("simon says", false); + closeShardNoCheck(shard); AtomicReference shardRef = new AtomicReference<>(); Settings settings = Settings.builder().put("indices.memory.index_buffer_size", "50kb").build(); Iterable iterable = () -> (shardRef.get() == null) diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index d27e924110c15..7a31d725e6340 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -372,7 +372,7 @@ public void testResetStartRequestIfTranslogIsCorrupted() throws Exception { DiscoveryNode rNode = DiscoveryNodeUtils.builder("foo").roles(Collections.emptySet()).build(); IndexShard shard = newStartedShard(false); final SeqNoStats seqNoStats = populateRandomData(shard); - shard.close("test", false); + closeShardNoCheck(shard); if (randomBoolean()) { shard.store().associateIndexWithNewTranslog(UUIDs.randomBase64UUID()); } else if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java index e53019fd93506..ef8af18322f8d 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java @@ -22,6 +22,8 @@ import java.util.Set; import java.util.regex.Pattern; +import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck; + public class RecoveryStatusTests extends ESSingleNodeTestCase { private static final Version MIN_SUPPORTED_LUCENE_VERSION = IndexVersions.MINIMUM_COMPATIBLE.luceneVersion(); @@ -71,7 +73,7 @@ public void testRenameTempFiles() throws IOException { } } assertNotNull(expectedFile); - indexShard.close("foo", false);// we have to close it here otherwise rename fails since the write.lock is held by the engine + closeShardNoCheck(indexShard); // we have to close it here otherwise rename fails since the write.lock is held by the engine multiFileWriter.renameAllTempFiles(); strings = Sets.newHashSet(indexShard.store().directory().listAll()); assertTrue(strings.toString(), strings.contains("foo.bar")); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 20e85c9c6fed8..9590d83c87263 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -231,7 +231,7 @@ public void testDifferentHistoryUUIDDisablesOPsRecovery() throws Exception { final String historyUUID = replica.getHistoryUUID(); Translog.TranslogGeneration translogGeneration = getTranslog(replica).getGeneration(); shards.removeReplica(replica); - replica.close("test", false); + closeShardNoCheck(replica); IndexWriterConfig iwc = new IndexWriterConfig(null).setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here @@ -355,7 +355,7 @@ public void testSequenceBasedRecoveryKeepsTranslog() throws Exception { if (randomBoolean()) { shards.flush(); } - replica.close("test", randomBoolean()); + closeShardNoCheck(replica, randomBoolean()); replica.store().close(); final IndexShard newReplica = shards.addReplicaWithExistingPath(replica.shardPath(), replica.routingEntry().currentNodeId()); shards.recoverReplica(newReplica); @@ -472,7 +472,7 @@ public void testRecoveryTrimsLocalTranslog() throws Exception { } shards.syncGlobalCheckpoint(); shards.promoteReplicaToPrimary(randomFrom(shards.getReplicas())).get(); - oldPrimary.close("demoted", false); + closeShardNoCheck(oldPrimary); oldPrimary.store().close(); oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); shards.recoverReplica(oldPrimary); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 7c0d6af4a92cf..ce732a3b95a34 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -136,7 +136,7 @@ public void testRestoreSnapshotWithExistingFiles() throws IOException { } finally { if (shard != null && shard.state() != IndexShardState.CLOSED) { try { - shard.close("test", false); + closeShardNoCheck(shard); } finally { IOUtils.close(shard.store()); } @@ -205,7 +205,7 @@ public void testSnapshotWithConflictingName() throws Exception { } finally { if (shard != null && shard.state() != IndexShardState.CLOSED) { try { - shard.close("test", false); + closeShardNoCheck(shard); } finally { IOUtils.close(shard.store()); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 84b1f0a85eed2..b662e44c4b8de 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -681,7 +681,7 @@ protected void closeShard(IndexShard shard, boolean assertConsistencyBetweenTran EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber(engine); } } finally { - IOUtils.close(() -> shard.close("test", false), shard.store()); + IOUtils.close(() -> closeShardNoCheck(shard), shard.store()); } } @@ -693,6 +693,27 @@ protected void closeShards(Iterable shards) throws IOException { } } + /** + * Close an {@link IndexShard}, optionally flushing first, without performing the consistency checks that {@link #closeShard} performs. + */ + public static void closeShardNoCheck(IndexShard indexShard, boolean flushEngine) throws IOException { + indexShard.close("IndexShardTestCase#closeShardNoCheck", flushEngine); + } + + /** + * Close an {@link IndexShard} without flushing or performing the consistency checks that {@link #closeShard} performs. + */ + public static void closeShardNoCheck(IndexShard indexShard) throws IOException { + closeShardNoCheck(indexShard, false); + } + + /** + * Flush and close an {@link IndexShard}, without performing the consistency checks that {@link #closeShard} performs. + */ + public static void flushAndCloseShardNoCheck(IndexShard indexShard) throws IOException { + closeShardNoCheck(indexShard, true); + } + protected void recoverShardFromStore(IndexShard primary) throws IOException { primary.markAsRecovering( "store", diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 980710d83d52a..3a16f368d322a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -170,7 +170,7 @@ public void testAddRemoveShardOnLeader() throws Exception { if (leaderGroup.getReplicas().isEmpty() == false && randomInt(100) < 5) { IndexShard closingReplica = randomFrom(leaderGroup.getReplicas()); leaderGroup.removeReplica(closingReplica); - closingReplica.close("test", false); + closeShardNoCheck(closingReplica); closingReplica.store().close(); } else if (leaderGroup.getReplicas().isEmpty() == false && rarely()) { IndexShard newPrimary = randomFrom(leaderGroup.getReplicas()); diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java index b3ae0f9707c83..861abaf6f5893 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/frozen/FrozenIndexShardTests.java @@ -27,7 +27,7 @@ public void testRecoverFromFrozenPrimary() throws IOException { indexDoc(indexShard, "_doc", "1"); indexDoc(indexShard, "_doc", "2"); indexDoc(indexShard, "_doc", "3"); - indexShard.close("test", true); + flushAndCloseShardNoCheck(indexShard); final ShardRouting shardRouting = indexShard.routingEntry(); IndexShard frozenShard = reinitShard( indexShard, From 41d2df31f5df10fb7829856cf6f9468068b6bc2a Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Mon, 29 Apr 2024 07:39:43 -0400 Subject: [PATCH 019/244] [Transform] Disable PIT for remote clusters (#107969) PIT searches are still causing large data transfer rates when searching remote clusters. This is not a pure revert. The test code has been updated to match the latest on main. This reverts commit 9b584aa1f2c48ee808b3edce2a9f6c085e714489. --- docs/changelog/107969.yaml | 5 ++ .../transforms/ClientTransformIndexer.java | 3 +- .../ClientTransformIndexerTests.java | 64 +++++++++++++++++++ 3 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/107969.yaml diff --git a/docs/changelog/107969.yaml b/docs/changelog/107969.yaml new file mode 100644 index 0000000000000..ed63513d8d57d --- /dev/null +++ b/docs/changelog/107969.yaml @@ -0,0 +1,5 @@ +pr: 107969 +summary: Disable PIT for remote clusters +area: Transform +type: bug +issues: [] diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 66c618bc07c46..e9e06be2590a2 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -473,7 +473,8 @@ private void injectPointInTimeIfNeeded( ActionListener> listener ) { SearchRequest searchRequest = namedSearchRequest.v2(); - if (disablePit || searchRequest.indices().length == 0) { + // We explicitly disable PIT in the presence of remote clusters in the source due to huge PIT handles causing performance problems. + if (disablePit || searchRequest.indices().length == 0 || transformConfig.getSource().requiresRemoteCluster()) { listener.onResponse(namedSearchRequest); return; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 9173e75c4737a..7cb71ec2545d6 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -376,6 +376,70 @@ public void testDisablePit() throws InterruptedException { } } + public void testDisablePitWhenThereIsRemoteIndexInSource() throws InterruptedException { + TransformConfig config = new TransformConfig.Builder(TransformConfigTests.randomTransformConfig()) + // Remote index is configured within source + .setSource(new SourceConfig("remote-cluster:remote-index")) + .build(); + boolean pitEnabled = TransformEffectiveSettings.isPitDisabled(config.getSettings()) == false; + + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, true); + MockClientTransformIndexer indexer = new MockClientTransformIndexer( + mock(ThreadPool.class), + mock(ClusterService.class), + mock(IndexNameExpressionResolver.class), + mock(TransformExtension.class), + new TransformServices( + mock(IndexBasedTransformConfigManager.class), + mock(TransformCheckpointService.class), + mock(TransformAuditor.class), + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO), + mock(TransformNode.class) + ), + mock(CheckpointProvider.class), + new AtomicReference<>(IndexerState.STOPPED), + null, + new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456")), + mock(TransformIndexerStats.class), + config, + null, + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 0L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new TransformCheckpoint( + "transform", + Instant.now().toEpochMilli(), + 2L, + Collections.emptyMap(), + Instant.now().toEpochMilli() + ), + new SeqNoPrimaryTermAndIndex(1, 1, TransformInternalIndexConstants.LATEST_INDEX_NAME), + mock(TransformContext.class), + false + ); + + // Because remote index is configured within source, we expect PIT *not* being used regardless the transform settings + this.assertAsync( + listener -> indexer.doNextSearch(0, listener), + response -> assertNull(response.pointInTimeId()) + ); + + // reverse the setting + indexer.applyNewSettings(new SettingsConfig.Builder().setUsePit(pitEnabled == false).build()); + + // Because remote index is configured within source, we expect PIT *not* being used regardless the transform settings + this.assertAsync( + listener -> indexer.doNextSearch(0, listener), + response -> assertNull(response.pointInTimeId()) + ); + } + } + public void testHandlePitIndexNotFound() throws InterruptedException { // simulate a deleted index due to ILM try (var threadPool = createThreadPool()) { From bca53baa1498ea7818b4fd1eaf5e781036a40915 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 29 Apr 2024 13:42:57 +0200 Subject: [PATCH 020/244] Handle PIT Id as `BytesReference` instead of `String` (#107989) Handling the PIT id as a `BytesReference` instead of as base64 encoded string saves about a third of network traffic for these. We know that PIT ids can be a significant source of traffic so the savings are well worth it. Also, this saves cycles and memory on all nodes involved. A follow-up here would be exploring to slice these IDs out of network buffer instead of copying them to reduce memory usage and large allocations. --- .../action/search/CCSPointInTimeIT.java | 11 +++--- .../action/search/PointInTimeIT.java | 31 ++++++++------- .../cluster/routing/ShardRoutingRoleIT.java | 3 +- .../search/basic/QueryRewriteContextIT.java | 7 ++-- .../search/searchafter/SearchAfterIT.java | 3 +- .../search/slice/SearchSliceIT.java | 11 +++++- .../org/elasticsearch/TransportVersions.java | 1 + .../search/AbstractSearchAsyncAction.java | 5 ++- .../search/ClosePointInTimeRequest.java | 22 ++++++----- .../search/OpenPointInTimeResponse.java | 12 +++--- .../action/search/SearchContextId.java | 39 ++++++------------- .../action/search/SearchResponse.java | 19 +++++---- .../search/builder/PointInTimeBuilder.java | 28 +++++++++---- .../search/internal/ShardSearchRequest.java | 2 +- .../action/search/MockSearchPhaseContext.java | 6 ++- .../RestOpenPointInTimeActionTests.java | 3 +- .../action/search/SearchContextIdTests.java | 3 +- .../action/search/SearchRequestTests.java | 17 +++++--- .../search/SearchServiceTests.java | 6 ++- .../builder/PointInTimeBuilderTests.java | 3 +- .../builder/SearchSourceBuilderTests.java | 12 ++++-- .../search/slice/SliceBuilderTests.java | 3 +- .../search/RandomSearchRequestGenerator.java | 7 +++- .../search/SearchResponseUtils.java | 6 ++- .../search/AsyncSearchIntegTestCase.java | 3 +- .../execution/search/PITAwareQueryClient.java | 8 +++- .../execution/sample/CircuitBreakerTests.java | 4 +- .../search/PITAwareQueryClientTests.java | 4 +- .../sequence/CircuitBreakerTests.java | 4 +- .../index/engine/frozen/FrozenIndexIT.java | 13 +++++-- .../index/engine/frozen/FrozenIndexTests.java | 5 ++- .../RetrySearchIntegTests.java | 3 +- .../BlobStoreCacheMaintenanceService.java | 9 +++-- .../DocumentLevelSecurityTests.java | 3 +- .../integration/FieldLevelSecurityTests.java | 5 ++- .../authz/AuthorizationServiceTests.java | 6 ++- .../xpack/sql/execution/search/Querier.java | 6 +-- .../xpack/sql/analysis/CancellationTests.java | 4 +- .../transforms/ClientTransformIndexer.java | 3 +- .../ClientTransformIndexerTests.java | 36 +++++++++-------- 40 files changed, 228 insertions(+), 148 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index 4bd97f772e4c3..26afe0e52bd02 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; @@ -92,7 +93,7 @@ public void testBasic() { indices.add(randomFrom("*", "local_*", "local_test")); } indices.add(randomFrom("*:*", "remote_cluster:*", "remote_cluster:remote_test")); - String pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); try { if (randomBoolean()) { localClient.prepareIndex("local_test").setId("local_new").setSource().get(); @@ -162,7 +163,7 @@ public void testOpenPITWithIndexFilter() { request.keepAlive(TimeValue.timeValueMinutes(2)); request.indexFilter(new RangeQueryBuilder("@timestamp").gte("2023-12-15")); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - String pitId = response.getPointInTimeId(); + BytesReference pitId = response.getPointInTimeId(); if (randomBoolean()) { localClient.prepareIndex("local_test").setId("local_new").setSource().get(); @@ -252,7 +253,7 @@ public void testFailuresOnOneShardsWithPointInTime() throws ExecutionException, indices.add(randomFrom("*", "local_*", "local_test")); } indices.add(randomFrom("*:*", "remote_cluster:*", "remote_cluster:remote_test")); - String pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); try { if (randomBoolean()) { localClient.prepareIndex("local_test").setId("local_new").setSource().get(); @@ -308,13 +309,13 @@ private static void assertAllSuccessfulShards(SearchResponse.Cluster cluster, in assertFalse(cluster.isTimedOut()); } - private String openPointInTime(String[] indices, TimeValue keepAlive) { + private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } - private void closePointInTime(String readerId) { + private void closePointInTime(BytesReference readerId) { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index 0c1930c0cf925..a9a5bb074c9ac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -16,6 +16,8 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; @@ -82,7 +84,7 @@ public void testBasic() { prepareIndex("test").setId(id).setSource("value", i).get(); } refresh("test"); - String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { assertThat(resp1.pointInTimeId(), equalTo(pitId)); assertHitCount(resp1, numDocs); @@ -128,7 +130,7 @@ public void testMultipleIndices() { prepareIndex(index).setId(id).setSource("value", i).get(); } refresh(); - String pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); try { int moreDocs = randomIntBetween(10, 50); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { @@ -181,7 +183,7 @@ public void testIndexFilter() { OpenPointInTimeRequest request = new OpenPointInTimeRequest("*").keepAlive(TimeValue.timeValueMinutes(2)); request.indexFilter(new RangeQueryBuilder("@timestamp").gte("2023-03-01")); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - String pitId = response.getPointInTimeId(); + BytesReference pitId = response.getPointInTimeId(); try { SearchContextId searchContextId = SearchContextId.decode(writableRegistry(), pitId); String[] actualIndices = searchContextId.getActualIndices(); @@ -210,7 +212,7 @@ public void testRelocation() throws Exception { prepareIndex("test").setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); @@ -262,7 +264,7 @@ public void testPointInTimeNotFound() throws Exception { prepareIndex("index").setId(id).setSource("value", i).get(); } refresh(); - String pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); + BytesReference pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { assertHitCount(resp1, index1); if (rarely()) { @@ -303,7 +305,7 @@ public void testIndexNotFound() { prepareIndex("index-2").setId(id).setSource("value", i).get(); } refresh(); - String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse( prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), @@ -333,7 +335,7 @@ public void testIndexNotFound() { public void testAllowNoIndex() { var request = new OpenPointInTimeRequest("my_index").indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) .keepAlive(TimeValue.timeValueMinutes(between(1, 10))); - String pit = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet().getPointInTimeId(); + BytesReference pit = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet().getPointInTimeId(); var closeResp = client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pit)).actionGet(); assertThat(closeResp.status(), equalTo(RestStatus.OK)); } @@ -346,7 +348,7 @@ public void testCanMatch() throws Exception { assertAcked(prepareCreate("test").setSettings(settings).setMapping(""" {"properties":{"created_date":{"type": "date", "format": "yyyy-MM-dd"}}}""")); ensureGreen("test"); - String pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); try { for (String node : internalCluster().nodesInclude("test")) { for (IndexService indexService : internalCluster().getInstance(IndicesService.class, node)) { @@ -413,7 +415,7 @@ public void testPartialResults() throws Exception { prepareIndex(randomFrom("test-2")).setId(Integer.toString(i)).setSource("value", i).get(); } refresh(); - String pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); + BytesReference pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs1 + numDocs2); @@ -445,7 +447,7 @@ public void testPITTiebreak() throws Exception { } } refresh("index-*"); - String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)); + BytesReference pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueHours(1)); try { for (int size = 1; size <= numIndex; size++) { SortOrder order = randomBoolean() ? SortOrder.ASC : SortOrder.DESC; @@ -476,7 +478,10 @@ public void testPITTiebreak() throws Exception { } public void testCloseInvalidPointInTime() { - expectThrows(Exception.class, client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(""))); + expectThrows( + Exception.class, + client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(BytesArray.EMPTY)) + ); List tasks = clusterAdmin().prepareListTasks().setActions(TransportClosePointInTimeAction.TYPE.name()).get().getTasks(); assertThat(tasks, empty()); } @@ -585,13 +590,13 @@ private void assertPagination(PointInTimeBuilder pit, int expectedNumDocs, int s assertThat(seen.size(), equalTo(expectedNumDocs)); } - private String openPointInTime(String[] indices, TimeValue keepAlive) { + private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } - private void closePointInTime(String readerId) { + private void closePointInTime(BytesReference readerId) { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(readerId)).actionGet(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 895a60133251f..006c9e2394f3c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -525,7 +526,7 @@ public void testSearchRouting() throws Exception { // do nothing } } - String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); + BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)).setProfile(true), response -> { var profileResults = response.getProfileResults(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java index 8197b5b8bdd48..753a0a62bce5e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/QueryRewriteContextIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -136,7 +137,7 @@ public void testResolvedIndices_TransportSearchAction() { assertResolvedIndices(prepareSearch("test*"), Set.of("test*"), Set.of(indices), r -> {}); assertResolvedIndices(prepareSearch("alias"), Set.of("alias"), Set.of(indices), r -> {}); - final String pointInTimeId = openPointInTime(indices, TimeValue.timeValueMinutes(2)); + final BytesReference pointInTimeId = openPointInTime(indices, TimeValue.timeValueMinutes(2)); try { final PointInTimeBuilder pointInTimeBuilder = new PointInTimeBuilder(pointInTimeId); assertResolvedIndices(prepareSearch().setPointInTime(pointInTimeBuilder), Set.of(indices), Set.of(indices), r -> {}); @@ -190,13 +191,13 @@ public void testResolvedIndices_TransportValidateQueryAction() { ); } - private String openPointInTime(String[] indices, TimeValue keepAlive) { + private BytesReference openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); return response.getPointInTimeId(); } - private void closePointInTime(String pointInTimeId) { + private void closePointInTime(BytesReference pointInTimeId) { ClosePointInTimeResponse response = client().execute( TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pointInTimeId) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index a526e721da1ec..b9c3c27abf2d8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchAllQueryBuilder; @@ -452,7 +453,7 @@ public void testScrollAndSearchAfterWithBigIndex() { } } // search_after with sort with point in time - String pitID; + BytesReference pitID; { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index 018bf1b7332a2..59373380d539c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.Scroll; @@ -205,7 +206,7 @@ public void testPointInTime() throws Exception { // Open point-in-time reader OpenPointInTimeRequest request = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueSeconds(10)); OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); - String pointInTimeId = response.getPointInTimeId(); + BytesReference pointInTimeId = response.getPointInTimeId(); // Test sort on document IDs assertSearchSlicesWithPointInTime(field, ShardDocSortField.NAME, pointInTimeId, max, numDocs); @@ -217,7 +218,13 @@ public void testPointInTime() throws Exception { } } - private void assertSearchSlicesWithPointInTime(String sliceField, String sortField, String pointInTimeId, int numSlice, int numDocs) { + private void assertSearchSlicesWithPointInTime( + String sliceField, + String sortField, + BytesReference pointInTimeId, + int numSlice, + int numDocs + ) { int totalResults = 0; List keys = new ArrayList<>(); for (int id = 0; id < numSlice; id++) { diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index e05bf3ffeda0f..6a53829099223 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -184,6 +184,7 @@ static TransportVersion def(int id) { public static final TransportVersion ADD_RESOURCE_ALREADY_UPLOADED_EXCEPTION = def(8_643_00_0); public static final TransportVersion ESQL_MV_ORDERING_SORTED_ASCENDING = def(8_644_00_0); public static final TransportVersion ESQL_PAGE_MAPPING_TO_ITERATOR = def(8_645_00_0); + public static final TransportVersion BINARY_PIT_ID = def(8_646_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 1f8470b3bcd01..2f307d653f8a4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -661,7 +662,7 @@ private SearchResponse buildSearchResponse( SearchResponseSections internalSearchResponse, ShardSearchFailure[] failures, String scrollId, - String searchContextId + BytesReference searchContextId ) { int numSuccess = successfulOps.get(); int numFailures = failures.length; @@ -693,7 +694,7 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); } else { final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; - final String searchContextId; + final BytesReference searchContextId; if (buildPointInTimeFromSearchResults()) { searchContextId = SearchContextId.encode(queryResults.asList(), aliasFilter, minTransportVersion); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java index d97cddaf533e4..c2afb8fc05c46 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClosePointInTimeRequest.java @@ -11,7 +11,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; -import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ParseField; @@ -20,28 +21,29 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.Base64; public class ClosePointInTimeRequest extends ActionRequest implements ToXContentObject { private static final ParseField ID = new ParseField("id"); - private final String id; + private final BytesReference id; public ClosePointInTimeRequest(StreamInput in) throws IOException { super(in); - this.id = in.readString(); + this.id = in.readBytesReference(); } - public ClosePointInTimeRequest(String id) { + public ClosePointInTimeRequest(BytesReference id) { this.id = id; } - public String getId() { + public BytesReference getId() { return id; } @Override public ActionRequestValidationException validate() { - if (Strings.isEmpty(id)) { + if (id.length() == 0) { return ValidateActions.addValidationError("id is empty", null); } return null; @@ -50,7 +52,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(id); + out.writeBytesReference(id); } @Override @@ -66,21 +68,21 @@ public static ClosePointInTimeRequest fromXContent(XContentParser parser) throws throw new IllegalArgumentException("Malformed content, must start with an object"); } else { XContentParser.Token token; - String id = null; + BytesReference id = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME && parser.currentName().equals(ID.getPreferredName())) { token = parser.nextToken(); if (token.isValue() == false) { throw new IllegalArgumentException("the request must contain only [" + ID.getPreferredName() + " field"); } - id = parser.text(); + id = new BytesArray(Base64.getUrlDecoder().decode(parser.text())); } else { throw new IllegalArgumentException( "Unknown parameter [" + parser.currentName() + "] in request body or parameter is of the wrong type[" + token + "] " ); } } - if (Strings.isNullOrEmpty(id)) { + if (id == null || id.length() == 0) { throw new IllegalArgumentException("search context id is is not provided"); } return new ClosePointInTimeRequest(id); diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index fd565ad4878bf..dafcee894c9a6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -9,34 +9,36 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Base64; import java.util.Objects; public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { - private final String pointInTimeId; + private final BytesReference pointInTimeId; - public OpenPointInTimeResponse(String pointInTimeId) { + public OpenPointInTimeResponse(BytesReference pointInTimeId) { this.pointInTimeId = Objects.requireNonNull(pointInTimeId, "Point in time parameter must be not null"); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(pointInTimeId); + out.writeBytesReference(pointInTimeId); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("id", pointInTimeId); + builder.field("id", Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(pointInTimeId))); builder.endObject(); return builder; } - public String getPointInTimeId() { + public BytesReference getPointInTimeId() { return pointInTimeId; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 83a6870d72491..95d22e8a9034e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -8,15 +8,12 @@ package org.elasticsearch.action.search; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; @@ -27,10 +24,7 @@ import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterAware; -import java.io.ByteArrayInputStream; import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.Map; @@ -61,26 +55,21 @@ public boolean contains(ShardSearchContextId contextId) { return contextIds.contains(contextId); } - public static String encode( + public static BytesReference encode( List searchPhaseResults, Map aliasFilter, TransportVersion version ) { - final BytesReference bytesReference; - try (var encodedStreamOutput = new BytesStreamOutput()) { - try (var out = new OutputStreamStreamOutput(Base64.getUrlEncoder().wrap(encodedStreamOutput))) { - out.setTransportVersion(version); - TransportVersion.writeVersion(version, out); - out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); - out.writeMap(aliasFilter, StreamOutput::writeWriteable); - } - bytesReference = encodedStreamOutput.bytes(); + try (var out = new BytesStreamOutput()) { + out.setTransportVersion(version); + TransportVersion.writeVersion(version, out); + out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); + out.writeMap(aliasFilter, StreamOutput::writeWriteable); + return out.bytes(); } catch (IOException e) { assert false : e; throw new IllegalArgumentException(e); } - final BytesRef bytesRef = bytesReference.toBytesRef(); - return new String(bytesRef.bytes, bytesRef.offset, bytesRef.length, StandardCharsets.ISO_8859_1); } private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult searchPhaseResult) throws IOException { @@ -89,11 +78,8 @@ private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult s new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()).writeTo(out); } - public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, String id) { - try ( - var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(id.getBytes(StandardCharsets.ISO_8859_1))); - var in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(decodedInputStream), namedWriteableRegistry) - ) { + public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, BytesReference id) { + try (var in = new NamedWriteableAwareStreamInput(id.streamInput(), namedWriteableRegistry)) { final TransportVersion version = TransportVersion.readVersion(in); in.setTransportVersion(version); final Map shards = Collections.unmodifiableMap( @@ -110,11 +96,8 @@ public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegist } } - public static String[] decodeIndices(String id) { - try ( - var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(id.getBytes(StandardCharsets.ISO_8859_1))); - var in = new InputStreamStreamInput(decodedInputStream) - ) { + public static String[] decodeIndices(BytesReference id) { + try (var in = id.streamInput()) { final TransportVersion version = TransportVersion.readVersion(in); in.setTransportVersion(version); final Map shards = Collections.unmodifiableMap( diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index ad1ceefbbe159..e2443566786ae 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,6 +40,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Base64; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -75,7 +77,7 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO private final Boolean terminatedEarly; private final int numReducePhases; private final String scrollId; - private final String pointInTimeId; + private final BytesReference pointInTimeId; private final int totalShards; private final int successfulShards; private final int skippedShards; @@ -109,7 +111,7 @@ public SearchResponse(StreamInput in) throws IOException { scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); skippedShards = in.readVInt(); - pointInTimeId = in.readOptionalString(); + pointInTimeId = in.readOptionalBytesReference(); } public SearchResponse( @@ -156,7 +158,7 @@ public SearchResponse( long tookInMillis, ShardSearchFailure[] shardFailures, Clusters clusters, - String pointInTimeId + BytesReference pointInTimeId ) { this( searchResponseSections.hits, @@ -192,7 +194,7 @@ public SearchResponse( long tookInMillis, ShardSearchFailure[] shardFailures, Clusters clusters, - String pointInTimeId + BytesReference pointInTimeId ) { this.hits = hits; hits.incRef(); @@ -349,7 +351,7 @@ public String getScrollId() { /** * Returns the encoded string of the search context that the search request is used to executed */ - public String pointInTimeId() { + public BytesReference pointInTimeId() { return pointInTimeId; } @@ -419,7 +421,10 @@ public XContentBuilder headerToXContent(XContentBuilder builder, ToXContent.Para builder.field(SCROLL_ID.getPreferredName(), scrollId); } if (pointInTimeId != null) { - builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); + builder.field( + POINT_IN_TIME_ID.getPreferredName(), + Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(pointInTimeId)) + ); } builder.field(TOOK.getPreferredName(), tookInMillis); builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); @@ -462,7 +467,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); out.writeVInt(skippedShards); - out.writeOptionalString(pointInTimeId); + out.writeOptionalBytesReference(pointInTimeId); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java index 8a385be82fb12..1966f7eaa1e69 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/PointInTimeBuilder.java @@ -8,7 +8,10 @@ package org.elasticsearch.search.builder; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.search.SearchContextId; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -22,6 +25,7 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.Base64; import java.util.Objects; /** @@ -35,7 +39,7 @@ public final class PointInTimeBuilder implements Writeable, ToXContentFragment { static { PARSER = new ObjectParser<>(SearchSourceBuilder.POINT_IN_TIME.getPreferredName(), XContentParams::new); - PARSER.declareString((params, id) -> params.encodedId = id, ID_FIELD); + PARSER.declareString((params, id) -> params.encodedId = new BytesArray(Base64.getUrlDecoder().decode(id)), ID_FIELD); PARSER.declareField( (params, keepAlive) -> params.keepAlive = keepAlive, (p, c) -> TimeValue.parseTimeValue(p.text(), KEEP_ALIVE_FIELD.getPreferredName()), @@ -45,32 +49,40 @@ public final class PointInTimeBuilder implements Writeable, ToXContentFragment { } private static final class XContentParams { - private String encodedId; + private BytesReference encodedId; private TimeValue keepAlive; } - private final String encodedId; + private final BytesReference encodedId; private transient SearchContextId searchContextId; // lazily decoded from the encodedId private TimeValue keepAlive; - public PointInTimeBuilder(String pitID) { + public PointInTimeBuilder(BytesReference pitID) { this.encodedId = Objects.requireNonNull(pitID, "Point in time ID must be provided"); } public PointInTimeBuilder(StreamInput in) throws IOException { - encodedId = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.BINARY_PIT_ID)) { + encodedId = in.readBytesReference(); + } else { + encodedId = new BytesArray(Base64.getUrlDecoder().decode(in.readString())); + } keepAlive = in.readOptionalTimeValue(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(encodedId); + if (out.getTransportVersion().onOrAfter(TransportVersions.BINARY_PIT_ID)) { + out.writeBytesReference(encodedId); + } else { + out.writeString(Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(encodedId))); + } out.writeOptionalTimeValue(keepAlive); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(ID_FIELD.getPreferredName(), encodedId); + builder.field(ID_FIELD.getPreferredName(), Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(encodedId))); if (keepAlive != null) { builder.field(KEEP_ALIVE_FIELD.getPreferredName(), keepAlive.getStringRep()); } @@ -88,7 +100,7 @@ public static PointInTimeBuilder fromXContent(XContentParser parser) throws IOEx /** * Returns the encoded id of this point in time */ - public String getEncodedId() { + public BytesReference getEncodedId() { return encodedId; } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 2023ee2e8d4b6..488c956c187d5 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -440,7 +440,7 @@ public void source(SearchSourceBuilder source) { // of shard-level search requests. However, we need to assign as a dummy PIT instead of null as we verify PIT for // slice requests on data nodes. source = source.shallowCopy(); - source.pointInTimeBuilder(new PointInTimeBuilder("")); + source.pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)); } this.source = source; } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 8bfd61b8d5b32..70c6719c67d1b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -10,6 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -83,7 +85,9 @@ public OriginalIndices getOriginalIndices(int shardIndex) { @Override public void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; - String searchContextId = getRequest().pointInTimeBuilder() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; + BytesReference searchContextId = getRequest().pointInTimeBuilder() != null + ? new BytesArray(TransportSearchHelper.buildScrollId(queryResults)) + : null; var existing = searchResponse.getAndSet( new SearchResponse( internalSearchResponse, diff --git a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java index 0b9abb44b71de..dda977565af45 100644 --- a/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/RestOpenPointInTimeActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.rest.FakeRestRequest; @@ -30,7 +31,7 @@ public void testMaxConcurrentSearchRequests() { verifyingClient.setExecuteVerifier(((actionType, transportRequest) -> { assertThat(transportRequest, instanceOf(OpenPointInTimeRequest.class)); transportRequests.add((OpenPointInTimeRequest) transportRequest); - return new OpenPointInTimeResponse("n/a"); + return new OpenPointInTimeResponse(new BytesArray("n/a")); })); { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java index 32091780484fa..32157e09e628f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.query.IdsQueryBuilder; @@ -64,7 +65,7 @@ public void testEncode() { aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); } } - final String id = SearchContextId.encode(queryResults.asList(), aliasFilters, version); + final BytesReference id = SearchContextId.encode(queryResults.asList(), aliasFilters, version); final SearchContextId context = SearchContextId.decode(namedWriteableRegistry, id); assertThat(context.shards().keySet(), hasSize(3)); assertThat(context.aliasFilter(), equalTo(aliasFilters)); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 95750cf6f412d..d8c7d3e134571 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.ArrayUtils; @@ -38,7 +39,9 @@ import org.elasticsearch.test.VersionUtils; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Base64; import java.util.List; import static java.util.Collections.emptyMap; @@ -331,7 +334,9 @@ public void testValidate() throws IOException { { // Reader context with scroll SearchRequest searchRequest = new SearchRequest().source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("id")) + new SearchSourceBuilder().pointInTimeBuilder( + new PointInTimeBuilder(new BytesArray(Base64.getUrlEncoder().encode("id".getBytes(StandardCharsets.UTF_8)))) + ) ).scroll(TimeValue.timeValueMillis(randomIntBetween(1, 100))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); @@ -505,7 +510,7 @@ public void testValidate() throws IOException { new SearchSourceBuilder().rankBuilder(new TestRankBuilder(100)) .query(QueryBuilders.termQuery("field", "term")) .knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0f }, 10, 100, null))) - .pointInTimeBuilder(new PointInTimeBuilder("test")) + .pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("test"))) ); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); @@ -538,7 +543,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest("test").source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("")) + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY)) ); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); @@ -550,7 +555,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest().indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED) - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); @@ -558,7 +563,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest().routing("route1") - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); @@ -566,7 +571,7 @@ public void testValidate() throws IOException { } { SearchRequest searchRequest = new SearchRequest().preference("pref1") - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(BytesArray.EMPTY))); ActionRequestValidationException validationErrors = searchRequest.validate(); assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index b9053bd5e4078..26c3f5831ec8c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -44,6 +44,8 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -1840,7 +1842,7 @@ public void testMinimalSearchSourceInShardRequests() { } indicesAdmin().prepareRefresh("test").get(); - String pitId = client().execute( + BytesReference pitId = client().execute( TransportOpenPointInTimeAction.TYPE, new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(10)) ).actionGet().getPointInTimeId(); @@ -1862,7 +1864,7 @@ public void testMinimalSearchSourceInShardRequests() { for (ShardSearchRequest shardRequest : shardRequests) { assertNotNull(shardRequest.source()); assertNotNull(shardRequest.source().pointInTimeBuilder()); - assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo("")); + assertThat(shardRequest.source().pointInTimeBuilder().getEncodedId(), equalTo(BytesArray.EMPTY)); } } diff --git a/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java index 7c26ddbbbdb7e..ebf03f544fca1 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/PointInTimeBuilderTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.builder; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -27,7 +28,7 @@ protected Writeable.Reader instanceReader() { @Override protected PointInTimeBuilder createTestInstance() { - final PointInTimeBuilder pointInTime = new PointInTimeBuilder(randomAlphaOfLength(20)); + final PointInTimeBuilder pointInTime = new PointInTimeBuilder(new BytesArray(randomAlphaOfLength(20))); if (randomBoolean()) { pointInTime.setKeepAlive(randomTimeValue()); } diff --git a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index c2c544c52b202..d92cc202980d2 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentHelper; @@ -62,6 +63,8 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; import java.util.List; import java.util.Map; @@ -436,7 +439,7 @@ public void testToXContentWithPointInTime() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); TimeValue keepAlive = randomBoolean() ? TimeValue.timeValueHours(1) : null; - searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder("id").setKeepAlive(keepAlive)); + searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("id")).setKeepAlive(keepAlive)); XContentBuilder builder = XContentFactory.contentBuilder(xContentType); searchSourceBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); BytesReference bytes = BytesReference.bytes(builder); @@ -444,7 +447,10 @@ public void testToXContentWithPointInTime() throws IOException { assertEquals(1, sourceAsMap.size()); @SuppressWarnings("unchecked") Map pit = (Map) sourceAsMap.get("pit"); - assertEquals("id", pit.get("id")); + assertEquals( + new String(Base64.getUrlEncoder().encode("id".getBytes(StandardCharsets.UTF_8)), StandardCharsets.ISO_8859_1), + pit.get("id") + ); if (keepAlive != null) { assertEquals("1h", pit.get("keep_alive")); assertEquals(2, pit.size()); @@ -771,7 +777,7 @@ public void testSearchSectionsUsageCollection() throws IOException { // these are not correct runtime mappings but they are counted compared to empty object searchSourceBuilder.runtimeMappings(Collections.singletonMap("field", "keyword")); searchSourceBuilder.knnSearch(List.of(new KnnSearchBuilder("field", new float[] {}, 2, 5, null))); - searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder("pitid")); + searchSourceBuilder.pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("pitid"))); searchSourceBuilder.docValueField("field"); searchSourceBuilder.storedField("field"); searchSourceBuilder.explain(true); diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index ba1453e464c64..cbbb48711cc8d 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; @@ -97,7 +98,7 @@ private IndexSettings createIndexSettings(IndexVersion indexVersionCreated) { private ShardSearchRequest createPointInTimeRequest(int shardIndex, int numShards) { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true) - .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("1m"))); + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("1m")))); return new ShardSearchRequest( OriginalIndices.NONE, searchRequest, diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index 3dfd9af485241..b3e9c10aed886 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; @@ -44,7 +45,9 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Base64; import java.util.List; import java.util.Map; import java.util.function.Supplier; @@ -379,7 +382,9 @@ public static SearchSourceBuilder randomSearchSourceBuilder( builder.collapse(randomCollapseBuilder.get()); } if (randomBoolean()) { - PointInTimeBuilder pit = new PointInTimeBuilder(randomAlphaOfLengthBetween(3, 10)); + PointInTimeBuilder pit = new PointInTimeBuilder( + new BytesArray(Base64.getUrlEncoder().encode(randomAlphaOfLengthBetween(3, 10).getBytes(StandardCharsets.UTF_8))) + ); if (randomBoolean()) { pit.setKeepAlive(TimeValue.timeValueMinutes(randomIntBetween(1, 60))); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index 91bee1ee253e9..2639aafae1300 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.client.Response; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.text.Text; @@ -52,6 +53,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -201,7 +203,7 @@ public static SearchResponse parseInnerSearchResponse(XContentParser parser) thr int totalShards = -1; int skippedShards = 0; // 0 for BWC String scrollId = null; - String searchContextId = null; + BytesReference searchContextId = null; List failures = new ArrayList<>(); SearchResponse.Clusters clusters = SearchResponse.Clusters.EMPTY; for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { @@ -211,7 +213,7 @@ public static SearchResponse parseInnerSearchResponse(XContentParser parser) thr if (SearchResponse.SCROLL_ID.match(currentFieldName, parser.getDeprecationHandler())) { scrollId = parser.text(); } else if (SearchResponse.POINT_IN_TIME_ID.match(currentFieldName, parser.getDeprecationHandler())) { - searchContextId = parser.text(); + searchContextId = new BytesArray(Base64.getUrlDecoder().decode(parser.text())); } else if (SearchResponse.TOOK.match(currentFieldName, parser.getDeprecationHandler())) { tookInMillis = parser.longValue(); } else if (SearchResponse.TIMED_OUT.match(currentFieldName, parser.getDeprecationHandler())) { diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java index 1fafa8462c694..56f957ff488d5 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; @@ -239,7 +240,7 @@ protected SearchResponseIterator assertBlockingIterator( int numFailures, int progressStep ) throws Exception { - final String pitId; + final BytesReference pitId; final SubmitAsyncSearchRequest request; if (randomBoolean()) { OpenPointInTimeRequest openPIT = new OpenPointInTimeRequest(indexName).keepAlive(TimeValue.timeValueMinutes(between(5, 10))); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java index cce3cdeb97961..2cfa4db37ee07 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.get.GetResult; @@ -44,7 +45,7 @@ // search and multi-search hence the code repetition public class PITAwareQueryClient extends BasicQueryClient { - private String pitId; + private BytesReference pitId; private final TimeValue keepAlive; private final QueryBuilder filter; @@ -114,7 +115,10 @@ private void makeRequestPITCompatible(SearchRequest request) { } // listener handing the extraction of new PIT and closing in case of exceptions - private ActionListener pitListener(Function pitIdExtractor, ActionListener listener) { + private ActionListener pitListener( + Function pitIdExtractor, + ActionListener listener + ) { return wrap(r -> { // get pid pitId = pitIdExtractor.apply(r); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index afb9b590914dd..943d1275364fb 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.TestCircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -192,7 +194,7 @@ private List mockCriteria() { private class ESMockClient extends NoOpClient { protected final CircuitBreaker circuitBreaker; - private final String pitId = "test_pit_id"; + private final BytesReference pitId = new BytesArray("test_pit_id"); ESMockClient(ThreadPool threadPool, CircuitBreaker circuitBreaker) { super(threadPool); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index 0bdb88592ce0f..c0e5d398d6508 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -21,6 +21,8 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -179,7 +181,7 @@ public void fetchHits(Iterable> refs, ActionListener { assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); @@ -277,7 +280,9 @@ public void testPointInTimeWithDeletedIndices() { IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest).actionGet().getPointInTimeId(); + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + .actionGet() + .getPointInTimeId(); try { indicesAdmin().prepareDelete("index-1").get(); // Return partial results if allow partial search result is allowed @@ -312,7 +317,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest("test-*").indicesOptions( IndicesOptions.strictExpandOpenAndForbidClosed() ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) .actionGet() .getPointInTimeId(); try { @@ -329,7 +334,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { final OpenPointInTimeRequest openPointInTimeRequest = new OpenPointInTimeRequest("test-*").keepAlive( TimeValue.timeValueMinutes(2) ); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest) .actionGet() .getPointInTimeId(); try { diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index e934cc3fcc8b2..d1db706562a37 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -83,7 +84,7 @@ protected Collection> getPlugins() { return pluginList(FrozenIndices.class, LocalStateCompositeXPackPlugin.class); } - String openReaders(TimeValue keepAlive, String... indices) { + BytesReference openReaders(TimeValue keepAlive, String... indices) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).indicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) .keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); @@ -145,7 +146,7 @@ public void testCloseFreezeAndOpen() throws Exception { } client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); - String pitId = openReaders(TimeValue.timeValueMinutes(1), indexName); + BytesReference pitId = openReaders(TimeValue.timeValueMinutes(1), indexName); try { for (int from = 0; from < 3; from++) { assertResponse( diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java index e3b631ba69c8a..c50fe50db8b40 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.search.TransportOpenPointInTimeAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexService; @@ -141,7 +142,7 @@ public void testRetryPointInTime() throws Exception { final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indexName).indicesOptions( IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED ).keepAlive(TimeValue.timeValueMinutes(2)); - final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); + final BytesReference pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertThat(resp.pointInTimeId(), equalTo(pitId)); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java index a40f21c0de08d..a21e3e6beabce 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java @@ -38,6 +38,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -484,7 +485,7 @@ public void onFailure(Exception e) { }); } - private static ActionListener closingPitBefore(Client client, String pointInTimeId, ActionListener listener) { + private static ActionListener closingPitBefore(Client client, BytesReference pointInTimeId, ActionListener listener) { return new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -498,7 +499,7 @@ public void onFailure(Exception e) { }; } - private static void closePit(Client client, String pointInTimeId, Runnable onCompletion) { + private static void closePit(Client client, BytesReference pointInTimeId, Runnable onCompletion) { client.execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pointInTimeId), new ActionListener<>() { @Override public void onResponse(ClosePointInTimeResponse response) { @@ -522,14 +523,14 @@ public void onFailure(Exception e) { * The maintenance task, once it has opened its PIT and started running so that it has all the state it needs to do its job. */ private class RunningPeriodicMaintenanceTask implements Runnable { - private final String pointInTimeId; + private final BytesReference pointInTimeId; private final RefCountingListener listeners; private final Instant expirationTime; private final Map> existingSnapshots; private final Set existingRepositories; RunningPeriodicMaintenanceTask( - String pointInTimeId, + BytesReference pointInTimeId, ActionListener listener, Instant expirationTime, Map> existingSnapshots, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index d105b616c57f1..1be8f543ebcb3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -1303,7 +1304,7 @@ public void testReaderId() throws Exception { } refresh(); - String pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); + BytesReference pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); SearchResponse response = null; try { for (int from = 0; from < numVisible; from++) { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 591b20bd82f47..849f5d1a48c5e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.internal.Requests; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -1157,7 +1158,7 @@ public void testScroll() throws Exception { } } - static String openPointInTime(String userName, TimeValue keepAlive, String... indices) { + static BytesReference openPointInTime(String userName, TimeValue keepAlive, String... indices) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().filterWithHeader( Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(userName, USERS_PASSWD)) @@ -1178,7 +1179,7 @@ public void testPointInTimeId() throws Exception { } refresh("test"); - String pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); + BytesReference pitId = openPointInTime("user1", TimeValue.timeValueMinutes(1), "test"); try { for (int from = 0; from < numDocs; from++) { assertResponse( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 861b21403b2b0..ae33c4e5e31e8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -94,6 +94,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -942,7 +944,7 @@ public void testUserWithNoRolesOpenPointInTimeWithRemoteIndices() { } public void testUserWithNoRolesCanClosePointInTime() { - final ClosePointInTimeRequest closePointInTimeRequest = new ClosePointInTimeRequest(randomAlphaOfLength(8)); + final ClosePointInTimeRequest closePointInTimeRequest = new ClosePointInTimeRequest(new BytesArray(randomAlphaOfLength(8))); final Authentication authentication = createAuthentication(new User("test user")); mockEmptyMetadata(); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); @@ -3641,7 +3643,7 @@ private static class TestSearchPhaseResult extends SearchPhaseResult { } } - private static String createEncodedPIT(Index index) { + private static BytesReference createEncodedPIT(Index index) { DiscoveryNode node1 = DiscoveryNodeUtils.create("node_1"); TestSearchPhaseResult testSearchPhaseResult1 = new TestSearchPhaseResult(new ShardSearchContextId("a", 1), node1); testSearchPhaseResult1.setSearchShardTarget( diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index 1d7a3cdd836ff..9cf60ec3bb2e4 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -160,7 +160,7 @@ private void searchWithPointInTime(SearchRequest search, ActionListener { - String pitId = openPointInTimeResponse.getPointInTimeId(); + BytesReference pitId = openPointInTimeResponse.getPointInTimeId(); search.indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS); search.indices(Strings.EMPTY_ARRAY); search.source().pointInTimeBuilder(new PointInTimeBuilder(pitId)); @@ -176,14 +176,14 @@ private void searchWithPointInTime(SearchRequest search, ActionListener listener) { + private static void closePointInTimeAfterError(Client client, BytesReference pointInTimeId, Exception e, ActionListener listener) { closePointInTime(client, pointInTimeId, wrap(r -> listener.onFailure(e), closeError -> { e.addSuppressed(closeError); listener.onFailure(e); })); } - public static void closePointInTime(Client client, String pointInTimeId, ActionListener listener) { + public static void closePointInTime(Client client, BytesReference pointInTimeId, ActionListener listener) { if (pointInTimeId != null) { // request should not be made with the parent task assigned because the parent task might already be canceled client = client instanceof ParentTaskAssigningClient wrapperClient ? wrapperClient.unwrap() : client; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java index 86cb54a33bb5a..10d6b04d7505c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/CancellationTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.tasks.TaskCancelHelper; import org.elasticsearch.tasks.TaskCancelledException; @@ -171,7 +173,7 @@ public void testCancellationDuringSearch(String query) throws InterruptedExcepti ClusterService mockClusterService = mockClusterService(nodeId); String[] indices = new String[] { "endgame" }; - String pitId = randomAlphaOfLength(10); + BytesReference pitId = new BytesArray(randomAlphaOfLength(10)); // Emulation of field capabilities FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index e9e06be2590a2..f77148de8d4a2 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -29,6 +29,7 @@ import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; @@ -450,7 +451,7 @@ private void closePointInTime(String name) { return; } - String oldPit = pit.getEncodedId(); + BytesReference oldPit = pit.getEncodedId(); ClosePointInTimeRequest closePitRequest = new ClosePointInTimeRequest(oldPit); ClientHelper.executeWithHeadersAsync( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 7cb71ec2545d6..04263277d6615 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -171,7 +173,7 @@ public void testPitInjection() throws InterruptedException { ); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); }); assertEquals(1L, client.getPitContextCounter()); @@ -184,15 +186,15 @@ public void testPitInjection() throws InterruptedException { assertEquals(0L, client.getPitContextCounter()); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id++"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+++"), response.pointInTimeId()); }); assertEquals(1L, client.getPitContextCounter()); @@ -201,15 +203,15 @@ public void testPitInjection() throws InterruptedException { assertEquals(0L, client.getPitContextCounter()); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id++"), response.pointInTimeId()); }); this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { - assertEquals("the_pit_id+++", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+++"), response.pointInTimeId()); }); assertEquals(1L, client.getPitContextCounter()); @@ -357,7 +359,7 @@ public void testDisablePit() throws InterruptedException { this.assertAsync(listener -> indexer.doNextSearch(0, listener), response -> { if (pitEnabled) { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); } else { assertNull(response.pointInTimeId()); } @@ -370,7 +372,7 @@ public void testDisablePit() throws InterruptedException { if (pitEnabled) { assertNull(response.pointInTimeId()); } else { - assertEquals("the_pit_id+", response.pointInTimeId()); + assertEquals(new BytesArray("the_pit_id+"), response.pointInTimeId()); } }); } @@ -446,7 +448,7 @@ public void testHandlePitIndexNotFound() throws InterruptedException { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); SearchRequest searchRequest = new SearchRequest("deleted-index").source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_on_deleted_index")) + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("the_pit_id_on_deleted_index"))) ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); this.assertAsync(listener -> indexer.doSearch(namedSearchRequest, listener), response -> { @@ -460,7 +462,7 @@ public void testHandlePitIndexNotFound() throws InterruptedException { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); SearchRequest searchRequest = new SearchRequest("essential-deleted-index").source( - new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_essential-deleted-index")) + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(new BytesArray("the_pit_id_essential-deleted-index"))) ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); indexer.doSearch(namedSearchRequest, ActionListener.wrap(r -> fail("expected a failure, got response"), e -> { @@ -541,7 +543,7 @@ protected void if (request instanceof OpenPointInTimeRequest) { if (pitSupported) { pitContextCounter.incrementAndGet(); - OpenPointInTimeResponse response = new OpenPointInTimeResponse("the_pit_id"); + OpenPointInTimeResponse response = new OpenPointInTimeResponse(new BytesArray("the_pit_id")); listener.onResponse((Response) response); } else { listener.onFailure(new ActionNotFoundTransportException("_pit")); @@ -556,13 +558,13 @@ protected void } else if (request instanceof SearchRequest searchRequest) { // if pit is used and deleted-index is given throw index not found if (searchRequest.pointInTimeBuilder() != null - && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_on_deleted_index")) { + && searchRequest.pointInTimeBuilder().getEncodedId().equals(new BytesArray("the_pit_id_on_deleted_index"))) { listener.onFailure(new IndexNotFoundException("deleted-index")); return; } if ((searchRequest.pointInTimeBuilder() != null - && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_essential-deleted-index")) + && searchRequest.pointInTimeBuilder().getEncodedId().equals(new BytesArray("the_pit_id_essential-deleted-index"))) || (searchRequest.indices().length > 0 && searchRequest.indices()[0].equals("essential-deleted-index"))) { listener.onFailure(new IndexNotFoundException("essential-deleted-index")); return; @@ -570,7 +572,7 @@ protected void // throw search context missing for the 4th run if (searchRequest.pointInTimeBuilder() != null - && "the_pit_id+++".equals(searchRequest.pointInTimeBuilder().getEncodedId())) { + && new BytesArray("the_pit_id+++").equals(searchRequest.pointInTimeBuilder().getEncodedId())) { listener.onFailure(new SearchContextMissingException(new ShardSearchContextId("sc_missing", 42))); } else { ActionListener.respondAndRelease( @@ -596,7 +598,9 @@ protected void ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY, // copy the pit from the request - searchRequest.pointInTimeBuilder() != null ? searchRequest.pointInTimeBuilder().getEncodedId() + "+" : null + searchRequest.pointInTimeBuilder() != null + ? CompositeBytesReference.of(searchRequest.pointInTimeBuilder().getEncodedId(), new BytesArray("+")) + : null ) ); From a577538da6a061a71c713805638e37d9123c1b0a Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Apr 2024 13:11:13 +0100 Subject: [PATCH 021/244] [ML] Add start model import message (#107941) The new log message is "[model_id] starting model import" --- .../ml/packageloader/action/ModelImporter.java | 15 +-------------- .../action/TransportLoadTrainedModelPackage.java | 2 ++ .../TransportLoadTrainedModelPackageTests.java | 10 +++++++--- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java index 69cb974a4514f..33d5d5982d2b0 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -19,8 +18,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.xpack.core.common.notifications.Level; -import org.elasticsearch.xpack.core.ml.action.AuditMlNotificationAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelDefinitionPartAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelVocabularyAction; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ModelPackageConfig; @@ -59,7 +56,7 @@ public void doImport() throws URISyntaxException, IOException, ElasticsearchStat if (Strings.isNullOrEmpty(config.getVocabularyFile()) == false) { uploadVocabulary(); - writeDebugNotification(modelId, format("imported model vocabulary [%s]", config.getVocabularyFile())); + logger.debug(() -> format("[%s] imported model vocabulary [%s]", modelId, config.getVocabularyFile())); } URI uri = ModelLoaderUtils.resolvePackageLocation( @@ -152,14 +149,4 @@ private void ex client.execute(action, request).actionGet(); } - - private void writeDebugNotification(String modelId, String message) { - client.execute( - AuditMlNotificationAction.INSTANCE, - new AuditMlNotificationAction.Request(AuditMlNotificationAction.AuditType.INFERENCE, modelId, message, Level.INFO), - ActionListener.noop() - ); - - logger.debug(() -> format("[%s] %s", modelId, message)); - } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index f31f01b7c2aae..b0544806d52bd 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -141,6 +141,8 @@ static void importModel( try { final long relativeStartNanos = System.nanoTime(); + logAndWriteNotificationAtInfo(auditClient, modelId, "starting model import"); + modelImporter.doImport(); final long totalRuntimeNanos = System.nanoTime() - relativeStartNanos; diff --git a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java index a4d7245acba6f..1e10ea48d03db 100644 --- a/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java +++ b/x-pack/plugin/ml-package-loader/src/test/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackageTests.java @@ -32,6 +32,7 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; public class TransportLoadTrainedModelPackageTests extends ESTestCase { @@ -53,7 +54,9 @@ public void testSendsFinishedUploadNotification() { ); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); - verify(client).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + // 2 notifications- the start and finish messages + verify(client, times(2)).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + // Only the last message is captured assertThat(notificationArg.getValue().getMessage(), CoreMatchers.containsString("finished model import after")); } @@ -145,8 +148,9 @@ private void assertNotificationAndOnFailure(Exception thrownException, Elasticse TransportLoadTrainedModelPackage.importModel(client, taskManager, createRequestWithWaiting(), uploader, listener, task); var notificationArg = ArgumentCaptor.forClass(AuditMlNotificationAction.Request.class); - verify(client).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); - assertThat(notificationArg.getValue().getMessage(), is(message)); + // 2 notifications- the starting message and the failure + verify(client, times(2)).execute(eq(AuditMlNotificationAction.INSTANCE), notificationArg.capture(), any()); + assertThat(notificationArg.getValue().getMessage(), is(message)); // the last message is captured var receivedException = (ElasticsearchStatusException) failureRef.get(); assertThat(receivedException.toString(), is(onFailureException.toString())); From 4b5c5e2dedaa902363dd1ff434b3c789fb29ed08 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Mon, 29 Apr 2024 14:27:20 +0200 Subject: [PATCH 022/244] Update BUCKET docs in source (#108005) This applies a review proposed changes to the source, so that they're synchronized to the generated output. --- .../esql/functions/examples/bucket.asciidoc | 2 +- .../functions/kibana/definition/bucket.json | 3 ++- .../expression/function/grouping/Bucket.java | 23 +++++++++++++------ 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/docs/reference/esql/functions/examples/bucket.asciidoc b/docs/reference/esql/functions/examples/bucket.asciidoc index f66f737b7d4b5..e1bba0529d7db 100644 --- a/docs/reference/esql/functions/examples/bucket.asciidoc +++ b/docs/reference/esql/functions/examples/bucket.asciidoc @@ -108,7 +108,6 @@ include::{esql-specs}/bucket.csv-spec[tag=bucket_in_agg] |=== include::{esql-specs}/bucket.csv-spec[tag=bucket_in_agg-result] |=== - `BUCKET` may be used in both the aggregating and grouping part of the <> command provided that in the aggregating part the function is referenced by an alias defined in the @@ -121,3 +120,4 @@ include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression] |=== include::{esql-specs}/bucket.csv-spec[tag=reuseGroupingFunctionWithExpression-result] |=== + diff --git a/docs/reference/esql/functions/kibana/definition/bucket.json b/docs/reference/esql/functions/kibana/definition/bucket.json index 986c0e8f91d33..7141ca4c27443 100644 --- a/docs/reference/esql/functions/kibana/definition/bucket.json +++ b/docs/reference/esql/functions/kibana/definition/bucket.json @@ -943,6 +943,7 @@ "FROM employees\n| STATS COUNT(*) by bs = BUCKET(salary, 20, 25324, 74999)\n| SORT bs", "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS c = COUNT(1) BY b = BUCKET(salary, 5000.)\n| SORT b", "FROM sample_data \n| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW()\n| STATS COUNT(*) BY bucket = BUCKET(@timestamp, 25, NOW() - 1 day, NOW())", - "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket" + "FROM employees\n| WHERE hire_date >= \"1985-01-01T00:00:00Z\" AND hire_date < \"1986-01-01T00:00:00Z\"\n| STATS AVG(salary) BY bucket = BUCKET(hire_date, 20, \"1985-01-01T00:00:00Z\", \"1986-01-01T00:00:00Z\")\n| SORT bucket", + "FROM employees\n| STATS s1 = b1 + 1, s2 = BUCKET(salary / 1000 + 999, 50.) + 2 BY b1 = BUCKET(salary / 100 + 99, 50.), b2 = BUCKET(salary / 1000 + 999, 50.)\n| SORT b1, b2\n| KEEP s1, b1, s2, b2" ] } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java index 218d469d626f9..32073d830841f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/grouping/Bucket.java @@ -93,7 +93,7 @@ public class Bucket extends GroupingFunction implements Validatable, TwoOptional @Example( description = """ `BUCKET` can work in two modes: one in which the size of the bucket is computed - based on a buckets count recommendation (four parameters) and a range and + based on a buckets count recommendation (four parameters) and a range, and another in which the bucket size is provided directly (two parameters). Using a target number of buckets, a start of a range, and an end of a range, @@ -127,8 +127,8 @@ another in which the bucket size is provided directly (two parameters). @Example(description = """ If the desired bucket size is known in advance, simply provide it as the second argument, leaving the range out:""", file = "bucket", tag = "docsBucketWeeklyHistogramWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, its type must be - of a time duration or date period type."""), + NOTE: When providing the bucket size as the second parameter, it must be a time + duration or date period."""), @Example( description = "`BUCKET` can also operate on numeric fields. For example, to create a salary histogram:", file = "bucket", @@ -138,10 +138,10 @@ another in which the bucket size is provided directly (two parameters). You have to find the `min` and `max` separately. {esql} doesn't yet have an easy way to do that automatically.""" ), @Example(description = """ - If the desired bucket size is known in advance, simply provide it as the second - argument, leaving the range out:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ - NOTE: When providing the bucket size as the second parameter, its type must be - of a floating type."""), + The range can be omitted if the desired bucket size is known in advance. Simply + provide it as the second argument:""", file = "bucket", tag = "docsBucketNumericWithSpan", explanation = """ + NOTE: When providing the bucket size as the second parameter, it must be + of a floating point type."""), @Example( description = "Create hourly buckets for the last 24 hours, and calculate the number of events per hour:", file = "bucket", @@ -151,6 +151,15 @@ another in which the bucket size is provided directly (two parameters). description = "Create monthly buckets for the year 1985, and calculate the average salary by hiring month", file = "bucket", tag = "bucket_in_agg" + ), + @Example( + description = """ + `BUCKET` may be used in both the aggregating and grouping part of the + <> command provided that in the aggregating + part the function is referenced by an alias defined in the + grouping part, or that it is invoked with the exact same expression:""", + file = "bucket", + tag = "reuseGroupingFunctionWithExpression" ) } ) public Bucket( From 89a6c7e6285aa275ca6f38174693c647d8d88075 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:37:29 +0300 Subject: [PATCH 023/244] [TEST] ignore transient error in overlapping downsample (#108001) * Add retries in concurrent downsampling action * [TEST] ignore transient error in overlapping downsample --- .../downsample/DownsampleActionSingleNodeTests.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index d68f6e8d11f81..80bb0368a1afc 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -555,8 +555,13 @@ public void onFailure(Exception e) { } }); - // Downsample with retries, in case the downsampled index is not ready. - assertBusy(() -> downsample(sourceIndex, downsampleIndex, config), 120, TimeUnit.SECONDS); + assertBusy(() -> { + try { + client().execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)); + } catch (ElasticsearchException e) { + fail("transient failure due to overlapping downsample operations"); + } + }); // We must wait until the in-progress downsample ends, otherwise data will not be cleaned up assertBusy(() -> assertTrue("In progress downsample did not complete", downsampleListener.success), 60, TimeUnit.SECONDS); From b0283eb6cf14cbc5227dd5158c462ada598802af Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Mon, 29 Apr 2024 15:50:59 +0300 Subject: [PATCH 024/244] Conditionally display effective retention (#107964) --- .../get/GetComponentTemplateAction.java | 9 +++++++- .../get/GetComposableIndexTemplateAction.java | 13 +++++++----- .../post/SimulateIndexTemplateResponse.java | 20 +++++++----------- .../datastreams/GetDataStreamAction.java | 8 ++++++- .../ExplainDataStreamLifecycleAction.java | 8 ++++++- .../GetDataStreamLifecycleAction.java | 7 ++++++- .../cluster/metadata/DataStreamLifecycle.java | 13 ++++++++++++ .../metadata/DataStreamLifecycleTests.java | 21 +++++++++++++++++++ 8 files changed, 77 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 3bf9c3715b29a..8ef1df3d29a58 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -196,7 +197,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), componentTemplate.getKey()); builder.field(COMPONENT_TEMPLATE.getPreferredName()); - componentTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); + componentTemplate.getValue() + .toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index f2fcbeff73c37..07ebfe123c98f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -157,10 +158,6 @@ public Map indexTemplates() { return indexTemplates; } - public RolloverConfiguration getRolloverConfiguration() { - return rolloverConfiguration; - } - public DataStreamGlobalRetention getGlobalRetention() { return globalRetention; } @@ -199,7 +196,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field(NAME.getPreferredName(), indexTemplate.getKey()); builder.field(INDEX_TEMPLATE.getPreferredName()); - indexTemplate.getValue().toXContent(builder, params, rolloverConfiguration, globalRetention); + indexTemplate.getValue() + .toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); builder.endObject(); } builder.endArray(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 52d40626f97ed..6985e86fb287a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,22 +70,10 @@ public SimulateIndexTemplateResponse( this.globalRetention = globalRetention; } - public Template getResolvedTemplate() { - return resolvedTemplate; - } - - public Map> getOverlappingTemplates() { - return overlappingTemplates; - } - public RolloverConfiguration getRolloverConfiguration() { return rolloverConfiguration; } - public DataStreamGlobalRetention getGlobalRetention() { - return globalRetention; - } - public SimulateIndexTemplateResponse(StreamInput in) throws IOException { super(in); resolvedTemplate = in.readOptionalWriteable(Template::new); @@ -132,7 +121,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); if (this.resolvedTemplate != null) { builder.field(TEMPLATE.getPreferredName()); - this.resolvedTemplate.toXContent(builder, params, rolloverConfiguration, globalRetention); + this.resolvedTemplate.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } if (this.overlappingTemplates != null) { builder.startArray(OVERLAPPING.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index f2a581472303b..01ce7cbd3346b 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAutoShardingEvent; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -546,7 +547,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.startArray(DATA_STREAMS_FIELD.getPreferredName()); for (DataStreamInfo dataStream : dataStreams) { - dataStream.toXContent(builder, params, rolloverConfiguration, globalRetention); + dataStream.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } builder.endArray(); builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java index 17d33ae9167fd..36fc66c67c842 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/ExplainDataStreamLifecycleAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -212,7 +213,12 @@ public Iterator toXContentChunked(ToXContent.Params outerP return builder; }), Iterators.map(indices.iterator(), explainIndexDataLifecycle -> (builder, params) -> { builder.field(explainIndexDataLifecycle.getIndex()); - explainIndexDataLifecycle.toXContent(builder, outerParams, rolloverConfiguration, globalRetention); + explainIndexDataLifecycle.toXContent( + builder, + DataStreamLifecycle.maybeAddEffectiveRetentionParams(outerParams), + rolloverConfiguration, + globalRetention + ); return builder; }), Iterators.single((builder, params) -> { builder.endObject(); diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java index 1c9dbb0575a1d..c7384e7003963 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/lifecycle/GetDataStreamLifecycleAction.java @@ -174,7 +174,12 @@ public XContentBuilder toXContent( builder.field(NAME_FIELD.getPreferredName(), dataStreamName); if (lifecycle != null) { builder.field(LIFECYCLE_FIELD.getPreferredName()); - lifecycle.toXContent(builder, params, rolloverConfiguration, globalRetention); + lifecycle.toXContent( + builder, + org.elasticsearch.cluster.metadata.DataStreamLifecycle.maybeAddEffectiveRetentionParams(params), + rolloverConfiguration, + globalRetention + ); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 9e23ffed6e8c5..9c89945046126 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -24,11 +24,13 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -355,6 +357,17 @@ public static DataStreamLifecycle fromXContent(XContentParser parser) throws IOE return PARSER.parse(parser, null); } + /** + * Adds a retention param to signal that this serialisation should include the effective retention metadata + */ + public static ToXContent.Params maybeAddEffectiveRetentionParams(ToXContent.Params params) { + boolean shouldAddEffectiveRetention = Objects.equals(params.param(RestRequest.PATH_RESTRICTED), "serverless"); + return new DelegatingMapParams( + Map.of(INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, Boolean.toString(shouldAddEffectiveRetention)), + params + ); + } + public static Builder newBuilder(DataStreamLifecycle lifecycle) { return new Builder().dataRetention(lifecycle.getDataRetention()) .downsampling(lifecycle.getDownsampling()) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java index f6f915b0e1a3d..a2b18c3328fd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamLifecycleTests.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -38,6 +39,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DATA_STREAM_CONFIGURATION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.DEFAULT_GLOBAL_RETENTION; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.RetentionSource.MAX_GLOBAL_RETENTION; +import static org.elasticsearch.rest.RestRequest.PATH_RESTRICTED; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -343,6 +345,25 @@ public void testEffectiveRetention() { } } + public void testEffectiveRetentionParams() { + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams(new ToXContent.MapParams(Map.of())); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); + } + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( + new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "not-serverless")) + ); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(false)); + } + { + ToXContent.Params params = DataStreamLifecycle.maybeAddEffectiveRetentionParams( + new ToXContent.MapParams(Map.of(PATH_RESTRICTED, "serverless")) + ); + assertThat(params.paramAsBoolean(DataStreamLifecycle.INCLUDE_EFFECTIVE_RETENTION_PARAM_NAME, false), equalTo(true)); + } + } + @Nullable public static DataStreamLifecycle randomLifecycle() { return DataStreamLifecycle.newBuilder() From e4f0b193c6e56373be027ede79b4eb63c0b0336b Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 29 Apr 2024 16:35:46 +0200 Subject: [PATCH 025/244] Optimise BinaryRangeAggregator for single value fields (#108016) this commit optimise range aggregations over IP for single value fields. --- docs/changelog/108016.yaml | 5 ++ .../bucket/range/BinaryRangeAggregator.java | 73 +++++++++++++------ 2 files changed, 56 insertions(+), 22 deletions(-) create mode 100644 docs/changelog/108016.yaml diff --git a/docs/changelog/108016.yaml b/docs/changelog/108016.yaml new file mode 100644 index 0000000000000..0aa3f86a6f859 --- /dev/null +++ b/docs/changelog/108016.yaml @@ -0,0 +1,5 @@ +pr: 108016 +summary: Optimise `BinaryRangeAggregator` for single value fields +area: Aggregations +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java index 51901b422c861..1a793ecd80b11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/BinaryRangeAggregator.java @@ -7,9 +7,13 @@ */ package org.elasticsearch.search.aggregations.bucket.range; +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionContext; @@ -130,9 +134,9 @@ protected void doCollect(LeafBucketCollector sub, int doc, long bucket) throws I abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBase { - final long[] froms, tos, maxTos; - final SortedSetDocValues values; - final LeafBucketCollector sub; + private final long[] froms, tos, maxTos; + private final DocCollector collector; + private final LeafBucketCollector sub; SortedSetRangeLeafCollector(SortedSetDocValues values, Range[] ranges, LeafBucketCollector sub) throws IOException { super(sub, values); @@ -141,7 +145,23 @@ abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBas throw new IllegalArgumentException("Ranges must be sorted"); } } - this.values = values; + final SortedDocValues singleton = DocValues.unwrapSingleton(values); + if (singleton != null) { + this.collector = (doc, bucket) -> { + if (singleton.advanceExact(doc)) { + collect(doc, singleton.ordValue(), bucket, 0); + } + }; + } else { + this.collector = (doc, bucket) -> { + if (values.advanceExact(doc)) { + int lo = 0; + for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { + lo = collect(doc, ord, bucket, lo); + } + } + }; + } this.sub = sub; froms = new long[ranges.length]; tos = new long[ranges.length]; // inclusive @@ -174,12 +194,7 @@ abstract static class SortedSetRangeLeafCollector extends LeafBucketCollectorBas @Override public void collect(int doc, long bucket) throws IOException { - if (values.advanceExact(doc)) { - int lo = 0; - for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { - lo = collect(doc, ord, bucket, lo); - } - } + collector.collect(doc, bucket); } private int collect(int doc, long ord, long bucket, int lowBound) throws IOException { @@ -236,10 +251,10 @@ private int collect(int doc, long ord, long bucket, int lowBound) throws IOExcep abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollectorBase { - final Range[] ranges; - final BytesRef[] maxTos; - final SortedBinaryDocValues values; - final LeafBucketCollector sub; + private final Range[] ranges; + private final BytesRef[] maxTos; + private final DocCollector collector; + private final LeafBucketCollector sub; SortedBinaryRangeLeafCollector(SortedBinaryDocValues values, Range[] ranges, LeafBucketCollector sub) { super(sub, values); @@ -248,7 +263,22 @@ abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollector throw new IllegalArgumentException("Ranges must be sorted"); } } - this.values = values; + final BinaryDocValues singleton = FieldData.unwrapSingleton(values); + if (singleton != null) { + this.collector = (doc, bucket) -> { + if (singleton.advanceExact(doc)) { + collect(doc, singleton.binaryValue(), bucket, 0); + } + }; + } else { + this.collector = (doc, bucket) -> { + if (values.advanceExact(doc)) { + for (int i = 0, lo = 0; i < values.docValueCount(); ++i) { + lo = collect(doc, values.nextValue(), bucket, lo); + } + } + }; + } this.sub = sub; this.ranges = ranges; maxTos = new BytesRef[ranges.length]; @@ -266,13 +296,7 @@ abstract static class SortedBinaryRangeLeafCollector extends LeafBucketCollector @Override public void collect(int doc, long bucket) throws IOException { - if (values.advanceExact(doc)) { - final int valuesCount = values.docValueCount(); - for (int i = 0, lo = 0; i < valuesCount; ++i) { - final BytesRef value = values.nextValue(); - lo = collect(doc, value, bucket, lo); - } - } + collector.collect(doc, bucket); } private int collect(int doc, BytesRef value, long bucket, int lowBound) throws IOException { @@ -327,6 +351,11 @@ private int collect(int doc, BytesRef value, long bucket, int lowBound) throws I protected abstract void doCollect(LeafBucketCollector sub, int doc, long bucket) throws IOException; } + @FunctionalInterface + private interface DocCollector { + void collect(int doc, long bucket) throws IOException; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return buildAggregationsForFixedBucketCount( From ee262954eedd3db27f75f5bb450b3422f965e1f8 Mon Sep 17 00:00:00 2001 From: eyalkoren <41850454+eyalkoren@users.noreply.github.com> Date: Mon, 29 Apr 2024 17:41:34 +0300 Subject: [PATCH 026/244] Adding aggregations support for the `_ignored` field (#101373) Enables aggregations on the _ignored metadata field replacing the stored field with doc values. --- docs/changelog/101373.yaml | 6 + .../mapping/fields/ignored-field.asciidoc | 17 + docs/reference/search/profile.asciidoc | 4 +- .../aggregations/ignored_metadata_field.yml | 302 ++++++++++++++++++ .../datastreams/LogsDataStreamIT.java | 4 +- .../rest-api-spec/test/30_inner_hits.yml | 4 +- .../ICUCollationKeywordFieldMapperTests.java | 4 +- .../IgnoredMetaFieldRollingUpgradeIT.java | 210 ++++++++++++ .../rest-api-spec/test/search/370_profile.yml | 6 +- .../index/mapper/IgnoredMetadataFieldIT.java | 164 ++++++++++ .../elasticsearch/index/IndexVersions.java | 1 + .../index/get/ShardGetService.java | 42 +++ .../index/mapper/IgnoredFieldMapper.java | 72 ++++- .../mapper/CompletionFieldMapperTests.java | 4 +- .../index/mapper/IgnoredFieldMapperTests.java | 43 ++- .../index/mapper/IgnoredFieldTypeTests.java | 8 + .../index/mapper/KeywordFieldMapperTests.java | 4 +- .../index/mapper/MapperScriptTestCase.java | 2 +- .../mapper/WildcardFieldMapperTests.java | 4 +- 19 files changed, 867 insertions(+), 34 deletions(-) create mode 100644 docs/changelog/101373.yaml create mode 100644 modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml create mode 100644 qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java diff --git a/docs/changelog/101373.yaml b/docs/changelog/101373.yaml new file mode 100644 index 0000000000000..53b5680301c79 --- /dev/null +++ b/docs/changelog/101373.yaml @@ -0,0 +1,6 @@ +pr: 101373 +summary: Adding aggregations support for the `_ignored` field +area: Search +type: feature +issues: + - 59946 diff --git a/docs/reference/mapping/fields/ignored-field.asciidoc b/docs/reference/mapping/fields/ignored-field.asciidoc index 5fd6c478438ab..48f8626c5ab0b 100644 --- a/docs/reference/mapping/fields/ignored-field.asciidoc +++ b/docs/reference/mapping/fields/ignored-field.asciidoc @@ -43,3 +43,20 @@ GET _search } } -------------------------------------------------- + +Since 8.15.0, the `_ignored` field supports aggregations as well. +For example, the below query finds all fields that got ignored: + +[source,console] +-------------------------------------------------- +GET _search +{ + "aggs": { + "ignored_fields": { + "terms": { + "field": "_ignored" + } + } + } +} +-------------------------------------------------- diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index 48c65ed0abc7b..3fed14231808c 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -194,7 +194,7 @@ The API returns the following result: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_ignored", "_routing", "_source"] + "stored_fields": ["_id", "_routing", "_source"] }, "children": [ { @@ -1051,7 +1051,7 @@ And here is the fetch profile: "load_source_count": 5 }, "debug": { - "stored_fields": ["_id", "_ignored", "_routing", "_source"] + "stored_fields": ["_id", "_routing", "_source"] }, "children": [ { diff --git a/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml new file mode 100644 index 0000000000000..fd15d24a5f3ca --- /dev/null +++ b/modules/aggregations/src/yamlRestTest/resources/rest-api-spec/test/aggregations/ignored_metadata_field.yml @@ -0,0 +1,302 @@ +setup: + - do: + indices.create: + index: test + body: + mappings: + properties: + city: + type: keyword + ignore_above: 10 + email: + type: keyword + ignore_above: 20 + date_of_birth: + type: date + format: "dd-MM-yyyy" + ignore_malformed: true + newsletter: + type: boolean + ignore_malformed: true + ip_address: + type: ip + ignore_malformed: true + products: + type: keyword + ignore_above: 12 + total_price: + type: double + ignore_malformed: true + location: + type: geo_point + ignore_malformed: true + order_datetime: + type: date + format: "yyyy-MM-dd HH:mm:ss" + ignore_malformed: true + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { "_id": "001" } } + - { "city": "Milano", email: "alice@gmail.com", date_of_birth: "12-03-1990", newsletter: true, ip_address: "130.34.45.202", products: ["it-002-4567", "it-001-6679"], total_price: "57.99", location: [45.46, 9.16], order_datetime: "2021-05-01 20:01:37" } + - { "index": { "_id": "002" } } + - { "city": "Roma", email: "bob@gmail.com", date_of_birth: "15-05-1991", newsletter: false, ip_address: "2001:0db8:85a3:0000:0000:8a2e:0370:7334", products: [ "it-002-112467", "it-002-5579" ], total_price: "10.99", location: [ -44.78, 19.20 ], order_datetime: "2021-05-01 20:01:37" } + - { "index": { "_id": "003" } } + - { "city": "Venezia", email: "alice@gmail.com", date_of_birth: "01-09-1994", newsletter: false, ip_address: "fe80::1", products: [ "it-002", "it-003-17171717" ], total_price: "-12.99", location: [ 182.22, "20.12" ], order_datetime: "2021-05-02" } + - { "index": { "_id": "004" } } + - { "city": "Cortina d'Ampezzo", email: "a-very-long-email-address-that-should-be-ignored@gmail.com", date_of_birth: "05-06-1989", newsletter: t, ip_address: "::1", products: [ "it101020203030", "it" ], total_price: "57", location: [ 0, 9.16 ], order_datetime: "2021-05-01-20:01:37" } + - { "index": { "_id": "005" } } + - { "city": "Cortina d'Ampezzo", email: "dave@gmail.com", date_of_birth: "12-03-1990 12:30:45", newsletter: t, ip_address: "130.999.36.201", products: [ "it-002-2213", "it-001-7709" ], total_price: "twentytree/12", location: [ "45.33, 8.20" ], order_datetime: "20210501 20:01:37" } + - { "index": { "_id": "006" } } + - { "city": "Milano", email: "eric@gmail.com", date_of_birth: "19-12-90", newsletter: f, ip_address: "130.34.45", products: [ "it-002-555", "it-001-5589990000" ], total_price: "", location: [ "45.99", "9.16" ], order_datetime: "2021-05-01 20:01:37.123" } + - { "index": { "_id": "007" } } + - { "city": "Venezia", email: "luke-skywalker@gmail.com", date_of_birth: "20/03/1992", newsletter: f, ip_address: "130..45.202", products: [ "it-002-1234", "it-001-1213" ], total_price: "57.99.12", location: [ 45, 20 ], order_datetime: "2021-05-03 19:38:22" } + - { "index": { "_id": "008" } } + - { "city": "Firenze", email: "bob@gmail.com", date_of_birth: "02311988", newsletter: "", ip_address: ":::1", products: ["", ""], total_price: "0.0", location: [ 46.22, 11.22 ], order_datetime: "2021-05-03 20:01" } + - { "index": { "_id": "009" } } + - { "city": "Firenze", email: "tom@gmail.com", date_of_birth: "16-11-1990", newsletter: "not_sure", ip_address: "2001:0db8::1234:5678::", products: "it-002-4567", total_price: "0,99", location: [ 18.18, 19.19 ], order_datetime: "2021-05-03 20-01-55" } + - { "index": { "_id": "010" } } + - { "city": "Cortina d'Ampezzo", email: "alice@gmail.com", date_of_birth: "18-12-1992", newsletter: "false", ip_address: ":::1", products: "it-002-1890994567", total_price: "14,27", location: [ 45.46-9.16 ], order_datetime: "2021-05-01 20:05:37" } + - { "index": { "_id": "011" } } + - { "city": "Roma", email: "paul@gmail.com", date_of_birth: "17.15.1990", newsletter: "true", ip_address: "", products: [ "it-002-1019", "it-001-5578", "it-009-9901256" ], total_price: "49.99", location: 45.22, order_datetime: "2021-05-01T20:02:00" } + +--- +"terms aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.ignored_terms.buckets: 9 } + - match: { aggregations.ignored_terms.buckets.0.key: "ip_address" } + - match: { aggregations.ignored_terms.buckets.0.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.1.key: "order_datetime" } + - match: { aggregations.ignored_terms.buckets.1.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.2.key: "products" } + - match: { aggregations.ignored_terms.buckets.2.doc_count: 6 } + - match: { aggregations.ignored_terms.buckets.3.key: "date_of_birth" } + - match: { aggregations.ignored_terms.buckets.3.doc_count: 5 } + - match: { aggregations.ignored_terms.buckets.4.key: "newsletter" } + - match: { aggregations.ignored_terms.buckets.4.doc_count: 5 } + - match: { aggregations.ignored_terms.buckets.5.key: "total_price" } + - match: { aggregations.ignored_terms.buckets.5.doc_count: 4 } + - match: { aggregations.ignored_terms.buckets.6.key: "city" } + - match: { aggregations.ignored_terms.buckets.6.doc_count: 3 } + - match: { aggregations.ignored_terms.buckets.7.key: "location" } + - match: { aggregations.ignored_terms.buckets.7.doc_count: 3 } + - match: { aggregations.ignored_terms.buckets.8.key: "email" } + - match: { aggregations.ignored_terms.buckets.8.doc_count: 2 } + +--- +"terms aggregation on _ignored metadata field with top hits": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_terms: + terms: + field: _ignored + size: 3 + aggs: + top_by_datetime: + top_hits: + sort: + - order_datetime: { order: desc } + size: 1 + + - match: { hits.total.value: 11 } + - length: { aggregations.ignored_terms.buckets: 3 } + + - match: { aggregations.ignored_terms.buckets.0.key: "ip_address" } + - match: { aggregations.ignored_terms.buckets.0.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.0.top_by_datetime.hits.hits.0._ignored: ["date_of_birth", "email", "ip_address", "newsletter", "total_price"]} + + - match: { aggregations.ignored_terms.buckets.1.key: "order_datetime" } + - match: { aggregations.ignored_terms.buckets.1.doc_count: 7 } + - match: { aggregations.ignored_terms.buckets.1.top_by_datetime.hits.hits.0._ignored: ["order_datetime", "products"]} + + - match: { aggregations.ignored_terms.buckets.2.key: "products" } + - match: { aggregations.ignored_terms.buckets.2.doc_count: 6 } + - match: { aggregations.ignored_terms.buckets.2.top_by_datetime.hits.hits.0._ignored: ["city", "ip_address", "location", "products", "total_price"]} + +--- +"date histogram aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + order_datetime_histo: + date_histogram: + field: order_datetime + calendar_interval: day + aggs: + ignored_terms: + terms: + field: _ignored + size: 2 + + - match: { hits.total.value: 11 } + - length: { aggregations.order_datetime_histo.buckets: 3 } + + - match: { aggregations.order_datetime_histo.buckets.0.key_as_string: "2021-05-01 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.0.doc_count: 3 } + - match: { aggregations.order_datetime_histo.buckets.0.ignored_terms.buckets.0: { key: "products", doc_count: 2 } } + + - match: { aggregations.order_datetime_histo.buckets.1.key_as_string: "2021-05-02 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.1.doc_count: 0 } + - length: { aggregations.order_datetime_histo.buckets.1.ignored_terms.buckets: 0 } + + - match: { aggregations.order_datetime_histo.buckets.2.key_as_string: "2021-05-03 00:00:00" } + - match: { aggregations.order_datetime_histo.buckets.2.doc_count: 1 } + - match: { aggregations.order_datetime_histo.buckets.2.ignored_terms.buckets.0: { key: "date_of_birth", doc_count: 1 } } + - match: { aggregations.order_datetime_histo.buckets.2.ignored_terms.buckets.1: { key: "email", doc_count: 1 } } + +--- +"cardinality aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_cardinality: + cardinality: + field: _ignored + + - match: { hits.total.value: 11 } + - match: {aggregations.ignored_cardinality.value: 9 } + +--- +"value count aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + ignored_value_count: + value_count: + field: _ignored + + - match: { hits.total.value: 11 } + - match: {aggregations.ignored_value_count.value: 42 } + +--- +"date range aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + order_datetime_range: + date_range: + field: order_datetime + format: "dd-MM-yyyy" + ranges: + - to: "03-05-2021" + - from: "02-05-2021" + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.order_datetime_range.buckets: 2 } + + - match: { aggregations.order_datetime_range.buckets.0.to_as_string: "03-05-2021" } + - match: { aggregations.order_datetime_range.buckets.0.doc_count: 3 } + - length: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets: 5 } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.0: { key: "products", doc_count: 2 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.1: { key: "city", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.2: { key: "ip_address", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.3: { key: "location", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.0.ignored_terms.buckets.4: { key: "total_price", doc_count: 1 } } + + - match: { aggregations.order_datetime_range.buckets.1.from_as_string: "02-05-2021" } + - match: { aggregations.order_datetime_range.buckets.1.doc_count: 1 } + - length: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets: 5 } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.0: { key: "date_of_birth", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.1: { key: "email", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.2: { key: "ip_address", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.3: { key: "newsletter", doc_count: 1 } } + - match: { aggregations.order_datetime_range.buckets.1.ignored_terms.buckets.4: { key: "total_price", doc_count: 1 } } + +--- +"random sampler aggregation with terms on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + - do: + search: + body: + size: 0 + aggs: + sample: + random_sampler: + probability: 1.0 # make sure buckets count is consistent + seed: 43 + aggs: + ignored_terms: + terms: + field: _ignored + + - match: { hits.total.value: 11 } + - length: { aggregations.sample.ignored_terms.buckets: 9 } + - match: { aggregations.sample.ignored_terms.buckets.0: { key: "ip_address", doc_count: 7 } } + - match: { aggregations.sample.ignored_terms.buckets.1: { key: "order_datetime", doc_count: 7 } } + - match: { aggregations.sample.ignored_terms.buckets.2: { key: "products", doc_count: 6 } } + - match: { aggregations.sample.ignored_terms.buckets.3: { key: "date_of_birth", doc_count: 5 } } + - match: { aggregations.sample.ignored_terms.buckets.4: { key: "newsletter", doc_count: 5 } } + - match: { aggregations.sample.ignored_terms.buckets.5: { key: "total_price", doc_count: 4 } } + - match: { aggregations.sample.ignored_terms.buckets.6: { key: "city", doc_count: 3 } } + - match: { aggregations.sample.ignored_terms.buckets.7: { key: "location", doc_count: 3 } } + - match: { aggregations.sample.ignored_terms.buckets.8: { key: "email", doc_count: 2 } } + +--- +"filter aggregation on _ignored metadata field": + - skip: + version: " - 8.14.99" + reason: "_ignored metadata field aggregation support added in 8.15" + features: close_to + - do: + search: + body: + size: 0 + aggs: + total: + sum: + field: total_price + filter_ignored: + filter: + term: + _ignored: "email" + + - match: { hits.total.value: 11 } + - close_to: { aggregations.total.value: { value: 162.98, error: 0.01 } } + - match: { aggregations.filter_ignored.doc_count: 2 } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java index eed06c5c69332..2370cca08b23e 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/LogsDataStreamIT.java @@ -19,6 +19,7 @@ import java.util.Map; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -550,9 +551,8 @@ public void testNoSubobjects() throws Exception { // "start-timestamp" doesn't match the ECS dynamic mapping pattern "*_timestamp" assertThat(fields.get("test.start-timestamp"), is(List.of("not a date"))); assertThat(ignored.size(), is(2)); - assertThat(ignored.get(0), is("vulnerability.textual_score")); + assertThat(ignored, containsInAnyOrder("test.start_timestamp", "vulnerability.textual_score")); // the ECS date dynamic template enforces mapping of "*_timestamp" fields to a date type - assertThat(ignored.get(1), is("test.start_timestamp")); assertThat(ignoredFieldValues.get("test.start_timestamp").size(), is(1)); assertThat(ignoredFieldValues.get("test.start_timestamp"), is(List.of("not a date"))); assertThat(ignoredFieldValues.get("vulnerability.textual_score").size(), is(1)); diff --git a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml index eff9a9beb35bc..40d646cc645f5 100644 --- a/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml +++ b/modules/parent-join/src/yamlRestTest/resources/rest-api-spec/test/30_inner_hits.yml @@ -120,7 +120,7 @@ teardown: --- profile fetch: - skip: - version: ' - 8.13.99' + version: ' - 8.14.99' reason: fetch fields and stored_fields using ValueFetcher - do: @@ -140,7 +140,7 @@ profile fetch: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java index 3a48a6bcce4e0..99017733dd989 100644 --- a/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/plugin/analysis/icu/ICUCollationKeywordFieldMapperTests.java @@ -281,8 +281,8 @@ public void testIgnoreAbove() throws IOException { fields = doc.rootDoc().getFields("field"); assertThat(fields, empty()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testUpdateIgnoreAbove() throws IOException { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java new file mode 100644 index 0000000000000..874fac615b9b1 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IgnoredMetaFieldRollingUpgradeIT.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +public class IgnoredMetaFieldRollingUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + private static final String TERMS_AGG_QUERY = Strings.format(""" + { + "aggs": { + "ignored_terms": { + "terms": { + "field": "_ignored" + } + } + } + }"""); + + public IgnoredMetaFieldRollingUpgradeIT(@Name("upgradedNodes") int upgradedNodes) { + super(upgradedNodes); + } + + public void testAggregation() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("index-old-agg")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("index-old-agg", "foofoo", "1024.12.321.777", "1")), RestStatus.CREATED); + if (getOldClusterIndexVersion().before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertTermsAggIgnoredMetadataFieldException( + "index-old-agg", + "Fielddata is not supported on field [_ignored] of type [_ignored]" + ); + } else { + assertTermsAggIgnoredMetadataField("index-old-agg"); + } + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + assertRestStatus(client().performRequest(createNewIndex("index-new-agg")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("index-new-agg", "barbar", "555.222.111.000", "2")), RestStatus.CREATED); + + assertTermsAggIgnoredMetadataField("index-*"); + if (getOldClusterIndexVersion().before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertTermsAggIgnoredMetadataFieldException( + "index-old-agg", + "Fielddata is not supported on field [_ignored] of type [_ignored]" + ); + } else { + assertTermsAggIgnoredMetadataField("index-old-agg"); + } + assertTermsAggIgnoredMetadataField("index-new-agg"); + } + } + + public void testIgnoredMetaFieldGetWithIgnoredQuery() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("old-get-ignored-index")), RestStatus.OK); + assertRestStatus( + client().performRequest(indexDocument("old-get-ignored-index", "foofoo", "192.168.10.1234", "1")), + RestStatus.CREATED + ); + final Map doc = entityAsMap(getWithIgnored("old-get-ignored-index", "1")); + assertThat(((List) doc.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + assertRestStatus( + client().performRequest(indexDocument("old-get-ignored-index", "barbar", "192.168.256.256", "2")), + RestStatus.CREATED + ); + final Map doc = entityAsMap(getWithIgnored("old-get-ignored-index", "2")); + // NOTE: here we are reading documents from an index created by an older version of Elasticsearch where the _ignored + // field could be stored depending on the version of Elasticsearch which created the index. The mapper for the _ignored field + // will keep the stored field if necessary to avoid mixing documents where the _ignored field is stored and documents where it + // is not, in the same index. + assertThat(((List) doc.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + + // NOTE: The stored field is dropped only once a new index is created by a new version of Elasticsearch. + final String newVersionIndexName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + assertRestStatus(client().performRequest(createNewIndex(newVersionIndexName)), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument(newVersionIndexName, "foobar", "192.168.777", "3")), RestStatus.CREATED); + final Map docFromNewIndex = entityAsMap(getWithIgnored(newVersionIndexName, "3")); + assertThat(((List) docFromNewIndex.get(IgnoredFieldMapper.NAME)), Matchers.containsInAnyOrder("ip_address", "keyword")); + } + } + + public void testIgnoredMetaFieldGetWithoutIgnoredQuery() throws IOException { + if (isOldCluster()) { + assertRestStatus(client().performRequest(createNewIndex("old-get-index")), RestStatus.OK); + assertRestStatus(client().performRequest(indexDocument("old-get-index", "foofoo", "192.168.169.300", "1")), RestStatus.CREATED); + final Map doc = entityAsMap(get("old-get-index", "1")); + if (getOldClusterIndexVersion().onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + assertNull(doc.get(IgnoredFieldMapper.NAME)); + } + } else if (isUpgradedCluster()) { + assertRestStatus(client().performRequest(waitForClusterStatus("green", "90s")), RestStatus.OK); + final Map doc1 = entityAsMap(get("old-get-index", "1")); + assertNull(doc1.get(IgnoredFieldMapper.NAME)); + assertRestStatus(client().performRequest(indexDocument("old-get-index", "barbar", "192.168.0.1234", "2")), RestStatus.CREATED); + final Map doc2 = entityAsMap(get("old-get-index", "2")); + assertNull(doc2.get(IgnoredFieldMapper.NAME)); + + final String newVersionIndexName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); + assertRestStatus(client().performRequest(createNewIndex(newVersionIndexName)), RestStatus.OK); + // NOTE: new Elasticsearch version does not used stored field for _ignored due to writing an index created by the new version + assertRestStatus( + client().performRequest(indexDocument(newVersionIndexName, "foobar", "263.192.168.12", "3")), + RestStatus.CREATED + ); + final Map docFromNewIndex = entityAsMap(get(newVersionIndexName, "3")); + assertNull(docFromNewIndex.get(IgnoredFieldMapper.NAME)); + } + } + + private static Response getWithIgnored(final String index, final String docId) throws IOException { + return client().performRequest(new Request("GET", "/" + index + "/_doc/" + docId + "?stored_fields=_ignored")); + } + + private static Response get(final String index, final String docId) throws IOException { + return client().performRequest(new Request("GET", "/" + index + "/_doc/" + docId)); + } + + private static Request waitForClusterStatus(final String statusColor, final String timeoutSeconds) { + final Request waitForGreen = new Request("GET", "/_cluster/health"); + waitForGreen.addParameter("wait_for_status", statusColor); + waitForGreen.addParameter("timeout", timeoutSeconds); + waitForGreen.addParameter("level", "shards"); + return waitForGreen; + } + + private static void assertRestStatus(final Response indexDocumentResponse, final RestStatus restStatus) { + assertThat(indexDocumentResponse.getStatusLine().getStatusCode(), Matchers.equalTo(restStatus.getStatus())); + } + + private static Request createNewIndex(final String indexName) throws IOException { + final Request createIndex = new Request("PUT", "/" + indexName); + final XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("mappings") + .startObject("properties") + .startObject("keyword") + .field("type", "keyword") + .field("ignore_above", 3) + .endObject() + .startObject("ip_address") + .field("type", "ip") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject(); + createIndex.setJsonEntity(Strings.toString(mappings)); + return createIndex; + } + + private static Request indexDocument(final String indexName, final String keywordValue, final String ipAddressValue, final String docId) + throws IOException { + final Request indexRequest = new Request("POST", "/" + indexName + "/_doc/" + docId); + final XContentBuilder doc = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .field("keyword", keywordValue) + .field("ip_address", ipAddressValue) + .endObject(); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity(Strings.toString(doc)); + return indexRequest; + } + + @SuppressWarnings("unchecked") + private static void assertTermsAggIgnoredMetadataField(final String indexPattern) throws IOException { + final Request aggRequest = new Request("POST", "/" + indexPattern + "/_search"); + aggRequest.addParameter("size", "0"); + aggRequest.setJsonEntity(TERMS_AGG_QUERY); + final Response aggResponse = client().performRequest(aggRequest); + final Map aggResponseEntityAsMap = entityAsMap(aggResponse); + final Map aggregations = (Map) aggResponseEntityAsMap.get("aggregations"); + final Map ignoredTerms = (Map) aggregations.get("ignored_terms"); + final List> buckets = (List>) ignoredTerms.get("buckets"); + assertThat(buckets.stream().map(bucket -> bucket.get("key")).toList(), Matchers.containsInAnyOrder("ip_address", "keyword")); + } + + private static void assertTermsAggIgnoredMetadataFieldException(final String indexPattern, final String exceptionMessage) { + final Request aggRequest = new Request("POST", "/" + indexPattern + "/_search"); + aggRequest.addParameter("size", "0"); + aggRequest.setJsonEntity(TERMS_AGG_QUERY); + final Exception responseException = assertThrows(ResponseException.class, () -> client().performRequest(aggRequest)); + assertThat(responseException.getMessage(), Matchers.containsString(exceptionMessage)); + } + +} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml index 817c62dbdd12d..7625f19557e9b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml @@ -41,7 +41,7 @@ fetch fields: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 2 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } @@ -74,7 +74,7 @@ fetch source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 3 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } @@ -139,7 +139,7 @@ fetch nested source: - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } - - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _ignored, _routing, _source] } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } - length: { profile.shards.0.fetch.children: 4 } - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } - match: { profile.shards.0.fetch.children.1.type: FetchSourcePhase } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java new file mode 100644 index 0000000000000..cfe5a2b69c6da --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/IgnoredMetadataFieldIT.java @@ -0,0 +1,164 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.junit.Before; + +import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.hasSize; + +@SuppressWarnings("resource") +public class IgnoredMetadataFieldIT extends ESSingleNodeTestCase { + + public static final String NUMERIC_FIELD_NAME = "numeric_field"; + public static final String DATE_FIELD_NAME = "date_field"; + public static final String TEST_INDEX = "test-index"; + public static final String CORRECT_FIELD_TYPE_DOC_ID = "1"; + public static final String WRONG_FIELD_TYPE_DOC_ID = "2"; + + @Before + public void createTestIndex() throws Exception { + CreateIndexResponse createIndexResponse = null; + try { + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject(NUMERIC_FIELD_NAME) + .field("type", "long") + .field("ignore_malformed", true) + .endObject() + .startObject(DATE_FIELD_NAME) + .field("type", "date") + .field("ignore_malformed", true) + .endObject() + .endObject() + .endObject() + .endObject(); + createIndexResponse = indicesAdmin().prepareCreate(TEST_INDEX).setMapping(mapping).get(); + assertAcked(createIndexResponse); + indexTestDoc(NUMERIC_FIELD_NAME, CORRECT_FIELD_TYPE_DOC_ID, "42"); + indexTestDoc(NUMERIC_FIELD_NAME, WRONG_FIELD_TYPE_DOC_ID, "forty-two"); + } finally { + if (createIndexResponse != null) { + createIndexResponse.decRef(); + } + } + } + + public void testIgnoredMetadataFieldFetch() { + SearchResponse searchResponse1 = null; + SearchResponse searchResponse2 = null; + try { + searchResponse1 = client().prepareSearch() + .setQuery(new IdsQueryBuilder().addIds(CORRECT_FIELD_TYPE_DOC_ID)) + .addFetchField(NUMERIC_FIELD_NAME) + .get(); + assertHitCount(searchResponse1, 1); + SearchHit hit = searchResponse1.getHits().getAt(0); + DocumentField numericField = hit.field(NUMERIC_FIELD_NAME); + assertNotNull(numericField); + assertEquals(42, (long) numericField.getValue()); + DocumentField ignoredField = hit.field(IgnoredFieldMapper.NAME); + assertNull(ignoredField); + + searchResponse2 = client().prepareSearch() + .setQuery(new IdsQueryBuilder().addIds(WRONG_FIELD_TYPE_DOC_ID)) + .addFetchField(NUMERIC_FIELD_NAME) + .get(); + assertHitCount(searchResponse2, 1); + hit = searchResponse2.getHits().getAt(0); + numericField = hit.field(NUMERIC_FIELD_NAME); + assertNotNull(numericField); + assertEquals("forty-two", numericField.getIgnoredValues().get(0)); + ignoredField = hit.field(IgnoredFieldMapper.NAME); + assertNotNull(ignoredField); + assertEquals(NUMERIC_FIELD_NAME, ignoredField.getValue()); + } finally { + if (searchResponse1 != null) { + searchResponse1.decRef(); + } + if (searchResponse2 != null) { + searchResponse2.decRef(); + } + } + } + + public void testIgnoredMetadataFieldAggregation() { + SearchResponse avgSearch = null; + SearchResponse termsSearch = null; + try { + indexTestDoc(NUMERIC_FIELD_NAME, "correct-44", "44"); + avgSearch = client().prepareSearch(TEST_INDEX) + .setSize(0) + .addAggregation(avg("numeric-field-aggs").field(NUMERIC_FIELD_NAME)) + .get(); + assertTrue(avgSearch.hasAggregations()); + InternalAvg avg = avgSearch.getAggregations().get("numeric-field-aggs"); + assertNotNull(avg); + assertEquals(43.0, avg.getValue(), 0.0); + + indexTestDoc(NUMERIC_FIELD_NAME, "wrong-44", "forty-four"); + indexTestDoc(DATE_FIELD_NAME, "wrong-date", "today"); + termsSearch = client().prepareSearch(TEST_INDEX) + .setSize(0) + .addAggregation(terms("ignored-field-aggs").field(IgnoredFieldMapper.NAME)) + .get(); + assertTrue(termsSearch.hasAggregations()); + StringTerms terms = termsSearch.getAggregations().get("ignored-field-aggs"); + assertNotNull(terms); + assertThat(terms.getBuckets(), hasSize(2)); + StringTerms.Bucket numericFieldBucket = terms.getBucketByKey(NUMERIC_FIELD_NAME); + assertEquals(NUMERIC_FIELD_NAME, numericFieldBucket.getKeyAsString()); + assertEquals(2, numericFieldBucket.getDocCount()); + StringTerms.Bucket dateFieldBucket = terms.getBucketByKey(DATE_FIELD_NAME); + assertEquals(DATE_FIELD_NAME, dateFieldBucket.getKeyAsString()); + assertEquals(1, dateFieldBucket.getDocCount()); + } finally { + if (avgSearch != null) { + avgSearch.decRef(); + } + if (termsSearch != null) { + termsSearch.decRef(); + } + } + } + + private void indexTestDoc(String testField, String docId, String testValue) { + DocWriteResponse docWriteResponse = null; + try { + docWriteResponse = client().prepareIndex(TEST_INDEX) + .setId(docId) + .setSource(testField, testValue) + .setRefreshPolicy(IMMEDIATE) + .get(); + assertEquals(RestStatus.CREATED, docWriteResponse.status()); + } finally { + if (docWriteResponse != null) { + docWriteResponse.decRef(); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index 6edd43683519e..78f07c8a137b9 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -104,6 +104,7 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion UPGRADE_TO_LUCENE_9_10 = def(8_503_00_0, Version.LUCENE_9_10_0); public static final IndexVersion TIME_SERIES_ROUTING_HASH_IN_ID = def(8_504_00_0, Version.LUCENE_9_10_0); public static final IndexVersion DEFAULT_DENSE_VECTOR_TO_INT8_HNSW = def(8_505_00_0, Version.LUCENE_9_10_0); + public static final IndexVersion DOC_VALUES_FOR_IGNORED_META_FIELD = def(8_505_00_1, Version.LUCENE_9_10_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 3758858a5b10a..3e191d0ab1e25 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -8,6 +8,7 @@ package org.elasticsearch.index.get; +import org.apache.lucene.index.SortedSetDocValues; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -17,10 +18,13 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; +import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; @@ -35,6 +39,8 @@ import org.elasticsearch.search.lookup.Source; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -308,6 +314,7 @@ private GetResult innerGetFetch( } // put stored fields into result objects + final IndexVersion indexVersion = indexSettings.getIndexVersionCreated(); if (leafStoredFieldLoader.storedFields().isEmpty() == false) { Set needed = new HashSet<>(); if (storedFields != null) { @@ -320,6 +327,10 @@ private GetResult innerGetFetch( if (false == needed.contains(entry.getKey())) { continue; } + if (IgnoredFieldMapper.NAME.equals(entry.getKey()) + && indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + continue; + } MappedFieldType ft = mapperService.fieldType(entry.getKey()); if (ft == null) { continue; // user asked for a non-existent field, ignore it @@ -333,6 +344,21 @@ private GetResult innerGetFetch( } } + // NOTE: when _ignored is requested via `stored_fields` we need to load it from doc values instead of loading it from stored fields. + // The _ignored field used to be stored, but as a result of supporting aggregations on it, it moved from using a stored field to + // using doc values. + if (indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD) + && storedFields != null + && Arrays.asList(storedFields).contains(IgnoredFieldMapper.NAME)) { + final DocumentField ignoredDocumentField = loadIgnoredMetadataField(docIdAndVersion); + if (ignoredDocumentField != null) { + if (metadataFields == null) { + metadataFields = new HashMap<>(); + } + metadataFields.put(IgnoredFieldMapper.NAME, ignoredDocumentField); + } + } + BytesReference sourceBytes = null; if (mapperService.mappingLookup().isSourceEnabled() && fetchSourceContext.fetchSource()) { Source source = loader.leaf(docIdAndVersion.reader, new int[] { docIdAndVersion.docId }) @@ -357,6 +383,22 @@ private GetResult innerGetFetch( ); } + private static DocumentField loadIgnoredMetadataField(final DocIdAndVersion docIdAndVersion) throws IOException { + final SortedSetDocValues ignoredDocValues = docIdAndVersion.reader.getContext() + .reader() + .getSortedSetDocValues(IgnoredFieldMapper.NAME); + if (ignoredDocValues == null + || ignoredDocValues.advanceExact(docIdAndVersion.docId) == false + || ignoredDocValues.docValueCount() <= 0) { + return null; + } + final List ignoredValues = new ArrayList<>(ignoredDocValues.docValueCount()); + for (int i = 0; i < ignoredDocValues.docValueCount(); i++) { + ignoredValues.add(ignoredDocValues.lookupOrd(ignoredDocValues.nextOrd()).utf8ToString()); + } + return new DocumentField(IgnoredFieldMapper.NAME, ignoredValues); + } + private static StoredFieldLoader buildStoredFieldLoader(String[] fields, FetchSourceContext fetchSourceContext, SourceLoader loader) { Set fieldsToLoad = new HashSet<>(); if (fields != null && fields.length > 0) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java index 4347bcfd8be3b..7da7992f9a9ca 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredFieldMapper.java @@ -9,10 +9,21 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.document.StringField; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.script.field.KeywordDocValuesField; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import java.util.Collections; @@ -30,14 +41,20 @@ public static class Defaults { } public static final IgnoredFieldType FIELD_TYPE = new IgnoredFieldType(); + private static final IgnoredFieldMapper INSTANCE = new IgnoredFieldMapper(FIELD_TYPE); - private static final IgnoredFieldMapper INSTANCE = new IgnoredFieldMapper(); + public static final LegacyIgnoredFieldType LEGACY_FIELD_TYPE = new LegacyIgnoredFieldType(); + private static final IgnoredFieldMapper LEGACY_INSTANCE = new IgnoredFieldMapper(LEGACY_FIELD_TYPE); - public static final TypeParser PARSER = new FixedTypeParser(c -> INSTANCE); + public static final TypeParser PARSER = new FixedTypeParser(c -> getInstance(c.indexVersionCreated())); - public static final class IgnoredFieldType extends StringFieldType { + private static MetadataFieldMapper getInstance(IndexVersion indexVersion) { + return indexVersion.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD) ? INSTANCE : LEGACY_INSTANCE; + } - private IgnoredFieldType() { + public static final class LegacyIgnoredFieldType extends StringFieldType { + + private LegacyIgnoredFieldType() { super(NAME, true, true, false, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); } @@ -46,6 +63,11 @@ public String typeName() { return CONTENT_TYPE; } + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new StoredValueFetcher(context.lookup(), NAME); + } + @Override public Query existsQuery(SearchExecutionContext context) { // This query is not performance sensitive, it only helps assess @@ -54,21 +76,53 @@ public Query existsQuery(SearchExecutionContext context) { // field is bounded by the number of fields in the mappings. return new TermRangeQuery(name(), null, null, true, true); } + } + + public static final class IgnoredFieldType extends StringFieldType { + + private IgnoredFieldType() { + super(NAME, true, false, true, TextSearchInfo.SIMPLE_MATCH_ONLY, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } @Override public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { - return new StoredValueFetcher(context.lookup(), NAME); + return new DocValueFetcher(docValueFormat(format, null), context.getForField(this, FielddataOperation.SEARCH)); + } + + public Query existsQuery(SearchExecutionContext context) { + return new FieldExistsQuery(name()); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + return new SortedSetOrdinalsIndexFieldData.Builder( + name(), + CoreValuesSourceType.KEYWORD, + (dv, n) -> new KeywordDocValuesField(FieldData.toString(dv), n) + ); } } - private IgnoredFieldMapper() { - super(FIELD_TYPE); + private IgnoredFieldMapper(StringFieldType fieldType) { + super(fieldType); } @Override public void postParse(DocumentParserContext context) { - for (String field : context.getIgnoredFields()) { - context.doc().add(new StringField(NAME, field, Field.Store.YES)); + if (context.indexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD)) { + for (String ignoredField : context.getIgnoredFields()) { + context.doc().add(new SortedSetDocValuesField(NAME, new BytesRef(ignoredField))); + context.doc().add(new StringField(NAME, ignoredField, Field.Store.NO)); + } + } else { + for (String ignoredField : context.getIgnoredFields()) { + context.doc().add(new StringField(NAME, ignoredField, Field.Store.YES)); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 982a7ed6afaa5..f574d95304c0a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -692,8 +692,8 @@ public void testFieldValueValidation() throws Exception { assertThat(doc.docs().size(), equalTo(1)); assertNull(doc.docs().get(0).get("field")); assertNotNull(doc.docs().get(0).getField("_ignored")); - IndexableField ignoredFields = doc.docs().get(0).getField("_ignored"); - assertThat(ignoredFields.stringValue(), equalTo("field")); + List ignoredFields = doc.docs().get(0).getFields("_ignored"); + assertTrue(ignoredFields.stream().anyMatch(field -> "field".equals(field.stringValue()))); // null inputs are ignored ParsedDocument nullDoc = defaultMapper.parse(source(b -> b.nullField("field"))); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java index 7eff2dc73d76f..477f75be4c5a0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldMapperTests.java @@ -8,13 +8,17 @@ package org.elasticsearch.index.mapper; +import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; @@ -22,8 +26,6 @@ import java.util.List; import static org.hamcrest.Matchers.containsString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class IgnoredFieldMapperTests extends MetadataMapperTestCase { @@ -54,9 +56,19 @@ public void testDefaults() throws IOException { ); ParsedDocument document = mapper.parse(source(b -> b.field("field", "value"))); List fields = document.rootDoc().getFields(IgnoredFieldMapper.NAME); - assertEquals(1, fields.size()); - assertEquals(IndexOptions.DOCS, fields.get(0).fieldType().indexOptions()); - assertTrue(fields.get(0).fieldType().stored()); + assertEquals(2, fields.size()); + IndexableField stringField = fields.stream() + .filter(field -> DocValuesType.NONE == field.fieldType().docValuesType()) + .findFirst() + .orElseThrow(); + assertEquals(IndexOptions.DOCS, stringField.fieldType().indexOptions()); + assertEquals("field", stringField.stringValue()); + assertEquals(DocValuesType.NONE, stringField.fieldType().docValuesType()); + IndexableField docValues = fields.stream() + .filter(field -> DocValuesType.SORTED_SET == field.fieldType().docValuesType()) + .findFirst() + .orElseThrow(); + assertEquals(IndexOptions.NONE, docValues.fieldType().indexOptions()); } public void testFetchIgnoredFieldValue() throws IOException { @@ -65,8 +77,7 @@ public void testFetchIgnoredFieldValue() throws IOException { iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field("field", "value"))).rootDoc()); }, iw -> { SearchLookup lookup = new SearchLookup(mapperService::fieldType, fieldDataLookup(mapperService), (ctx, doc) -> null); - SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); - when(searchExecutionContext.lookup()).thenReturn(lookup); + SearchExecutionContext searchExecutionContext = createSearchExecutionContext(mapperService); IgnoredFieldMapper.IgnoredFieldType ft = (IgnoredFieldMapper.IgnoredFieldType) mapperService.fieldType("_ignored"); ValueFetcher valueFetcher = ft.valueFetcher(searchExecutionContext, null); IndexSearcher searcher = newSearcher(iw); @@ -76,4 +87,22 @@ public void testFetchIgnoredFieldValue() throws IOException { }); } + public void testIgnoredFieldType() throws IOException { + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + IndexVersions.FIRST_DETACHED_INDEX_VERSION, + IndexVersion.current() + ); + boolean afterIntroducingDocValues = version.onOrAfter(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD); + boolean beforeRemovingStoredField = version.before(IndexVersions.DOC_VALUES_FOR_IGNORED_META_FIELD); + MapperService mapperService = createMapperService(version, fieldMapping(b -> b.field("type", "keyword").field("ignore_above", 3))); + withLuceneIndex(mapperService, iw -> { + iw.addDocument(mapperService.documentMapper().parse(source(b -> b.field("field", "value_to_ignore"))).rootDoc()); + }, iw -> { + MappedFieldType mappedFieldType = mapperService.fieldType(IgnoredFieldMapper.NAME); + assertEquals("version = " + version, afterIntroducingDocValues, mappedFieldType.hasDocValues()); + assertEquals("version = " + version, beforeRemovingStoredField, mappedFieldType.isStored()); + }); + } + } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java index 52475e7b059b5..520fe8e5ac582 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredFieldTypeTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper; import org.apache.lucene.index.Term; +import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.RegexpQuery; @@ -60,4 +61,11 @@ public void testWildcardQuery() { ); assertEquals("[wildcard] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", ee.getMessage()); } + + public void testExistsQuery() { + MappedFieldType ft = IgnoredFieldMapper.FIELD_TYPE; + + Query expected = new FieldExistsQuery(IgnoredFieldMapper.NAME); + assertEquals(expected, ft.existsQuery(MOCK_CONTEXT)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 4824bd337f5b0..e06ed1736cca2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -234,8 +234,8 @@ public void testIgnoreAbove() throws IOException { assertEquals(0, fields.size()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(doc.rootDoc().getFields("_ignored").stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testNullValue() throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java index 368de3e4d6e58..68d326be0dc83 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperScriptTestCase.java @@ -139,7 +139,7 @@ public final void testOnScriptErrorContinue() throws IOException { ParsedDocument doc = mapper.parse(source(b -> b.field("message", "this is some text"))); assertThat(doc.rootDoc().getFields("message_error"), hasSize(0)); - assertThat(doc.rootDoc().getField("_ignored").stringValue(), equalTo("message_error")); + assertTrue(doc.rootDoc().getFields("_ignored").stream().anyMatch(field -> "message_error".equals(field.stringValue()))); } public final void testRejectScriptErrors() throws IOException { diff --git a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java index a17cb7474a681..98f5daec730bb 100644 --- a/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java +++ b/x-pack/plugin/wildcard/src/test/java/org/elasticsearch/xpack/wildcard/mapper/WildcardFieldMapperTests.java @@ -206,8 +206,8 @@ public void testIgnoreAbove() throws IOException { assertEquals(0, fields.size()); fields = doc.rootDoc().getFields("_ignored"); - assertEquals(1, fields.size()); - assertEquals("field", fields.get(0).stringValue()); + assertEquals(2, fields.size()); + assertTrue(fields.stream().anyMatch(field -> "field".equals(field.stringValue()))); } public void testBWCIndexVersion() throws IOException { From 0a9ab8eef2978087d42b3a248e35cb818fbb8dc6 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 29 Apr 2024 11:14:05 -0400 Subject: [PATCH 027/244] remove ComparisonMapper (#107896) More follow-up work from #105217. Since the binary comparison operators now implement EvaluatorMapper, there is no need for ComparisonMapper going forward. --- .../xpack/esql/evaluator/EvalMapper.java | 7 - .../evaluator/mapper/EvaluatorMapper.java | 18 ++ .../operator/comparison/ComparisonMapper.java | 182 ------------------ .../operator/comparison/InMapper.java | 5 +- 4 files changed, 20 insertions(+), 192 deletions(-) delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index c26f722d9f765..096dcc183eaf4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -22,7 +22,6 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.ComparisonMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEqualsMapper; import org.elasticsearch.xpack.esql.planner.Layout; @@ -40,12 +39,6 @@ public final class EvalMapper { private static final List> MAPPERS = List.of( - ComparisonMapper.EQUALS, - ComparisonMapper.NOT_EQUALS, - ComparisonMapper.GREATER_THAN, - ComparisonMapper.GREATER_THAN_OR_EQUAL, - ComparisonMapper.LESS_THAN, - ComparisonMapper.LESS_THAN_OR_EQUAL, InMapper.IN_MAPPER, new InsensitiveEqualsMapper(), new BooleanLogic(), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java index e536547e006fd..7a084649ac4fa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/mapper/EvaluatorMapper.java @@ -15,6 +15,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.ql.expression.Expression; import java.util.function.Function; @@ -27,6 +28,22 @@ */ public interface EvaluatorMapper { /** + *

+ * Note for implementors: + * If you are implementing this function, you should call the passed-in + * lambda on your children, after doing any other manipulation (casting, + * etc.) necessary. + *

+ *

+ * Note for Callers: + * If you are attempting to call this method, and you have an + * {@link Expression} and a {@link org.elasticsearch.xpack.esql.planner.Layout}, + * you likely want to call {@link org.elasticsearch.xpack.esql.evaluator.EvalMapper#toEvaluator(Expression, Layout)} + * instead. On the other hand, if you already have something that + * looks like the parameter for this method, you should call this method + * with that function. + *

+ *

* Build an {@link ExpressionEvaluator.Factory} for the tree of * expressions rooted at this node. This is only guaranteed to return * a sensible evaluator if this node has a valid type. If this node @@ -35,6 +52,7 @@ public interface EvaluatorMapper { * If {@linkplain Expression#typeResolved} returns an error then * this method may throw. Or return an evaluator that produces * garbage. Or return an evaluator that throws when run. + *

*/ ExpressionEvaluator.Factory toEvaluator(Function toEvaluator); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java deleted file mode 100644 index 85b30032c1070..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; - -import org.elasticsearch.common.TriFunction; -import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; -import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; -import org.elasticsearch.xpack.esql.planner.Layout; -import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; -import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.type.DataTypes; - -import static org.elasticsearch.xpack.esql.evaluator.EvalMapper.toEvaluator; - -public abstract class ComparisonMapper extends ExpressionMapper { - public static final ExpressionMapper EQUALS = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EqualsBoolsEvaluator.Factory::new, - (s, l, r, t) -> new EqualsGeometriesEvaluator.Factory(s, l, r) - ) { - }; - - public static final ExpressionMapper NOT_EQUALS = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsKeywordsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEqualsBoolsEvaluator.Factory::new, - (s, l, r, t) -> new NotEqualsGeometriesEvaluator.Factory(s, l, r) - ) { - }; - - public static final ExpressionMapper GREATER_THAN = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper GREATER_THAN_OR_EQUAL = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqualKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper LESS_THAN = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanKeywordsEvaluator.Factory::new - ) { - }; - - public static final ExpressionMapper LESS_THAN_OR_EQUAL = new ComparisonMapper( - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualIntsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualLongsEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualDoublesEvaluator.Factory::new, - org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqualKeywordsEvaluator.Factory::new - ) { - }; - - private final TriFunction ints; - private final TriFunction longs; - private final TriFunction doubles; - private final TriFunction keywords; - private final TriFunction bools; - private final EvaluatorFunctionWithType geometries; - - @FunctionalInterface - private interface EvaluatorFunctionWithType { - ExpressionEvaluator.Factory apply(Source s, ExpressionEvaluator.Factory t, ExpressionEvaluator.Factory u, T dataType); - } - - private ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords, - TriFunction bools, - EvaluatorFunctionWithType geometries - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = bools; - this.geometries = geometries; - } - - private ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords, - TriFunction bools - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = bools; - this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; - } - - ComparisonMapper( - TriFunction ints, - TriFunction longs, - TriFunction doubles, - TriFunction keywords - ) { - this.ints = ints; - this.longs = longs; - this.doubles = doubles; - this.keywords = keywords; - this.bools = (source, lhs, rhs) -> { throw EsqlIllegalArgumentException.illegalDataType(DataTypes.BOOLEAN); }; - this.geometries = (source, lhs, rhs, dataType) -> { throw EsqlIllegalArgumentException.illegalDataType(dataType); }; - } - - @Override - public final ExpressionEvaluator.Factory map(BinaryComparison bc, Layout layout) { - DataType leftType = bc.left().dataType(); - if (leftType.isNumeric()) { - DataType type = EsqlDataTypeRegistry.INSTANCE.commonType(leftType, bc.right().dataType()); - if (type == DataTypes.INTEGER) { - return castToEvaluator(bc, layout, DataTypes.INTEGER, ints); - } - if (type == DataTypes.LONG) { - return castToEvaluator(bc, layout, DataTypes.LONG, longs); - } - if (type == DataTypes.DOUBLE) { - return castToEvaluator(bc, layout, DataTypes.DOUBLE, doubles); - } - if (type == DataTypes.UNSIGNED_LONG) { - // using the long comparators will work on UL as well - return castToEvaluator(bc, layout, DataTypes.UNSIGNED_LONG, longs); - } - } - var leftEval = toEvaluator(bc.left(), layout); - var rightEval = toEvaluator(bc.right(), layout); - if (leftType == DataTypes.KEYWORD || leftType == DataTypes.TEXT || leftType == DataTypes.IP || leftType == DataTypes.VERSION) { - return keywords.apply(bc.source(), leftEval, rightEval); - } - if (leftType == DataTypes.BOOLEAN) { - return bools.apply(bc.source(), leftEval, rightEval); - } - if (leftType == DataTypes.DATETIME) { - return longs.apply(bc.source(), leftEval, rightEval); - } - if (EsqlDataTypes.isSpatial(leftType)) { - return geometries.apply(bc.source(), leftEval, rightEval, leftType); - } - throw new EsqlIllegalArgumentException("resolved type for [" + bc + "] but didn't implement mapping"); - } - - public static ExpressionEvaluator.Factory castToEvaluator( - BinaryOperator op, - Layout layout, - DataType required, - TriFunction factory - ) { - var lhs = Cast.cast(op.source(), op.left().dataType(), required, toEvaluator(op.left(), layout)); - var rhs = Cast.cast(op.source(), op.right().dataType(), required, toEvaluator(op.right(), layout)); - return factory.apply(op.source(), lhs, rhs); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index 7b4e867adad91..cea88d3598c2f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -16,6 +16,7 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.planner.Layout; @@ -24,8 +25,6 @@ import java.util.BitSet; import java.util.List; -import static org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.ComparisonMapper.EQUALS; - public class InMapper extends ExpressionMapper { public static final InMapper IN_MAPPER = new InMapper(); @@ -38,7 +37,7 @@ public ExpressionEvaluator.Factory map(In in, Layout layout) { List listEvaluators = new ArrayList<>(in.list().size()); in.list().forEach(e -> { Equals eq = new Equals(in.source(), in.value(), e); - ExpressionEvaluator.Factory eqEvaluator = ((ExpressionMapper) EQUALS).map(eq, layout); + ExpressionEvaluator.Factory eqEvaluator = EvalMapper.toEvaluator(eq, layout); listEvaluators.add(eqEvaluator); }); return dvrCtx -> new InExpressionEvaluator(dvrCtx, listEvaluators.stream().map(fac -> fac.get(dvrCtx)).toList()); From 7eae95620b41c8c42a647b059b096703b4d510f4 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Mon, 29 Apr 2024 11:15:43 -0400 Subject: [PATCH 028/244] [ci] Move multi-node tests from check part2 to part5 (#107553) --- .buildkite/pipelines/intake.template.yml | 8 ++++++++ .buildkite/pipelines/intake.yml | 8 ++++++++ .buildkite/pipelines/lucene-snapshot/run-tests.yml | 8 ++++++++ .buildkite/pipelines/periodic-platform-support.yml | 2 ++ .buildkite/pipelines/periodic.template.yml | 2 ++ .buildkite/pipelines/periodic.yml | 2 ++ .buildkite/pipelines/pull-request/part-5-arm.yml | 13 +++++++++++++ .buildkite/pipelines/pull-request/part-5-fips.yml | 11 +++++++++++ .../pipelines/pull-request/part-5-windows.yml | 14 ++++++++++++++ .buildkite/pipelines/pull-request/part-5.yml | 11 +++++++++++ build.gradle | 2 ++ 11 files changed, 81 insertions(+) create mode 100644 .buildkite/pipelines/pull-request/part-5-arm.yml create mode 100644 .buildkite/pipelines/pull-request/part-5-fips.yml create mode 100644 .buildkite/pipelines/pull-request/part-5-windows.yml create mode 100644 .buildkite/pipelines/pull-request/part-5.yml diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml index 66b989d94455c..f530f237113a9 100644 --- a/.buildkite/pipelines/intake.template.yml +++ b/.buildkite/pipelines/intake.template.yml @@ -40,6 +40,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 8103b40cbaff0..b1f05ea23da4c 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -41,6 +41,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n1-standard-32 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/lucene-snapshot/run-tests.yml b/.buildkite/pipelines/lucene-snapshot/run-tests.yml index a5d3c4e5f7935..c76c54a56494e 100644 --- a/.buildkite/pipelines/lucene-snapshot/run-tests.yml +++ b/.buildkite/pipelines/lucene-snapshot/run-tests.yml @@ -40,6 +40,14 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: custom-32-98304 buildDirectory: /dev/shm/bk + - label: part5 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk - group: bwc-snapshots steps: - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml index f454d20fc542e..d8c5d55fc7e4f 100644 --- a/.buildkite/pipelines/periodic-platform-support.yml +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -48,6 +48,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -72,6 +73,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: aws diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml index b102208dd7cce..fda4315926b6b 100644 --- a/.buildkite/pipelines/periodic.template.yml +++ b/.buildkite/pipelines/periodic.template.yml @@ -50,6 +50,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -92,6 +93,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 3748f4941420e..fa37d37d9de9a 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -391,6 +391,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp @@ -433,6 +434,7 @@ steps: - checkPart2 - checkPart3 - checkPart4 + - checkPart5 - checkRestCompat agents: provider: gcp diff --git a/.buildkite/pipelines/pull-request/part-5-arm.yml b/.buildkite/pipelines/pull-request/part-5-arm.yml new file mode 100644 index 0000000000000..7bc3a6157155b --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-arm.yml @@ -0,0 +1,13 @@ +config: + allow-labels: "test-arm" +steps: + - label: part-5-arm + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart5 + timeout_in_minutes: 300 + agents: + provider: aws + imagePrefix: elasticsearch-ubuntu-2004-aarch64 + instanceType: m6g.8xlarge + diskSizeGb: 350 + diskType: gp3 + diskName: /dev/sda1 diff --git a/.buildkite/pipelines/pull-request/part-5-fips.yml b/.buildkite/pipelines/pull-request/part-5-fips.yml new file mode 100644 index 0000000000000..4e193ac751086 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-fips.yml @@ -0,0 +1,11 @@ +config: + allow-labels: "Team:Security" +steps: + - label: part-5-fips + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed -Dtests.fips.enabled=true checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/pull-request/part-5-windows.yml b/.buildkite/pipelines/pull-request/part-5-windows.yml new file mode 100644 index 0000000000000..4e16a8ef73238 --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5-windows.yml @@ -0,0 +1,14 @@ +config: + allow-labels: "test-windows" +steps: + - label: part-5-windows + command: .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-windows-2022 + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: checkPart5 diff --git a/.buildkite/pipelines/pull-request/part-5.yml b/.buildkite/pipelines/pull-request/part-5.yml new file mode 100644 index 0000000000000..306ce7533d0ed --- /dev/null +++ b/.buildkite/pipelines/pull-request/part-5.yml @@ -0,0 +1,11 @@ +config: + skip-target-branches: "7.17" +steps: + - label: part-5 + command: .ci/scripts/run-gradle.sh -Dignore.tests.seed checkPart5 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/build.gradle b/build.gradle index 16c6fce28fe4b..1d9757f32543d 100644 --- a/build.gradle +++ b/build.gradle @@ -287,6 +287,8 @@ allprojects { tasks.register('checkPart4') { dependsOn 'check' } } else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) { tasks.register('checkPart3') { dependsOn 'check' } + } else if (project.path.contains("multi-node")) { + tasks.register('checkPart5') { dependsOn 'check' } } else { tasks.register('checkPart2') { dependsOn 'check' } } From d6357452dc8b7a663c1eb1371a16949bcc7875b8 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 29 Apr 2024 11:35:24 -0400 Subject: [PATCH 029/244] ESQL: Add key names to description of hash lookup (#108012) When I was debugging further work on our new hash stuff I found myself really wanting the key names on the `description` and `toString` of the hash lookup operator. This adds it. --- .../compute/operator/HashLookupOperator.java | 52 +++++++++---------- .../elasticsearch/compute/OperatorTests.java | 8 ++- .../operator/HashLookupOperatorTests.java | 11 ++-- 3 files changed, 40 insertions(+), 31 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java index 2b77003f11a4f..f821f2a37d1cf 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/HashLookupOperator.java @@ -23,19 +23,26 @@ import java.util.List; public class HashLookupOperator extends AbstractPageMappingToIteratorOperator { + public record Key(String name, Block block) { + @Override + public String toString() { + return "{name=" + + name + + ", type=" + + block.elementType() + + ", positions=" + + block.getPositionCount() + + ", size=" + + ByteSizeValue.ofBytes(block.ramBytesUsed()) + + "}"; + } + } + /** * Factory for {@link HashLookupOperator}. It's received {@link Block}s * are never closed, so we need to build them from a non-tracking factory. */ - public static class Factory implements Operator.OperatorFactory { - private final Block[] keys; - private final int[] blockMapping; - - public Factory(Block[] keys, int[] blockMapping) { - this.keys = keys; - this.blockMapping = blockMapping; - } - + public record Factory(Key[] keys, int[] blockMapping) implements Operator.OperatorFactory { @Override public Operator get(DriverContext driverContext) { return new HashLookupOperator(driverContext.blockFactory(), keys, blockMapping); @@ -43,30 +50,23 @@ public Operator get(DriverContext driverContext) { @Override public String describe() { - StringBuilder b = new StringBuilder(); - b.append("HashLookup[keys=["); - for (int k = 0; k < keys.length; k++) { - Block key = keys[k]; - if (k != 0) { - b.append(", "); - } - b.append("{type=").append(key.elementType()); - b.append(", positions=").append(key.getPositionCount()); - b.append(", size=").append(ByteSizeValue.ofBytes(key.ramBytesUsed())).append("}"); - } - b.append("], mapping=").append(Arrays.toString(blockMapping)).append("]"); - return b.toString(); + return "HashLookup[keys=" + Arrays.toString(keys) + ", mapping=" + Arrays.toString(blockMapping) + "]"; } } + private final List keys; private final BlockHash hash; private final int[] blockMapping; - public HashLookupOperator(BlockFactory blockFactory, Block[] keys, int[] blockMapping) { + public HashLookupOperator(BlockFactory blockFactory, Key[] keys, int[] blockMapping) { this.blockMapping = blockMapping; + this.keys = new ArrayList<>(keys.length); + Block[] blocks = new Block[keys.length]; List groups = new ArrayList<>(keys.length); for (int k = 0; k < keys.length; k++) { - groups.add(new BlockHash.GroupSpec(k, keys[k].elementType())); + this.keys.add(keys[k].name); + blocks[k] = keys[k].block; + groups.add(new BlockHash.GroupSpec(k, keys[k].block.elementType())); } /* * Force PackedValuesBlockHash because it assigned ordinals in order @@ -83,7 +83,7 @@ public HashLookupOperator(BlockFactory blockFactory, Block[] keys, int[] blockMa boolean success = false; try { final int[] lastOrd = new int[] { -1 }; - hash.add(new Page(keys), new GroupingAggregatorFunction.AddInput() { + hash.add(new Page(blocks), new GroupingAggregatorFunction.AddInput() { @Override public void add(int positionOffset, IntBlock groupIds) { // TODO support multiple rows with the same keys @@ -128,7 +128,7 @@ protected ReleasableIterator receive(Page page) { @Override public String toString() { - return "HashLookup[hash=" + hash + ", mapping=" + Arrays.toString(blockMapping) + "]"; + return "HashLookup[keys=" + keys + ", hash=" + hash + ", mapping=" + Arrays.toString(blockMapping) + "]"; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index 805f26e9ef280..64afb14d22326 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -370,7 +370,13 @@ public void testHashLookup() { var driver = new Driver( driverContext, new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100), - List.of(new HashLookupOperator(driverContext.blockFactory(), new Block[] { primesBlock }, new int[] { 0 })), + List.of( + new HashLookupOperator( + driverContext.blockFactory(), + new HashLookupOperator.Key[] { new HashLookupOperator.Key("primes", primesBlock) }, + new int[] { 0 } + ) + ), new PageConsumerOperator(page -> { try { BlockTestUtils.readInto(actualValues, page.getBlock(0)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index ec69297718237..e2af5e82f0d46 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; @@ -31,18 +30,22 @@ protected void assertSimpleOutput(List input, List results) { @Override protected Operator.OperatorFactory simple() { return new HashLookupOperator.Factory( - new Block[] { TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 7, 14, 20 }, 3).asBlock() }, + new HashLookupOperator.Key[] { + new HashLookupOperator.Key( + "foo", + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 7, 14, 20 }, 3).asBlock() + ) }, new int[] { 0 } ); } @Override protected String expectedDescriptionOfSimple() { - return "HashLookup[keys=[{type=LONG, positions=3, size=96b}], mapping=[0]]"; + return "HashLookup[keys=[{name=foo, type=LONG, positions=3, size=96b}], mapping=[0]]"; } @Override protected String expectedToStringOfSimple() { - return "HashLookup[hash=PackedValuesBlockHash{groups=[0:LONG], entries=3, size=536b}, mapping=[0]]"; + return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=3, size=536b}, mapping=[0]]"; } } From 3e16d0ae559b9bc370fa45da5748bad69f125609 Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Mon, 29 Apr 2024 11:46:13 -0400 Subject: [PATCH 030/244] [Transform] Remove remote cluster from local test (#108014) This test can fail when we randomly add remote clusters. We would need to fix our assertion logic, but `testDisablePitWhenThereIsRemoteIndexInSource` already covers the test case when there is a remote cluster and pit disabled. --- .../transform/transforms/ClientTransformIndexerTests.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 04263277d6615..062c951f67c96 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -308,11 +308,6 @@ public void testPitInjectionIfPitNotSupported() throws InterruptedException { public void testDisablePit() throws InterruptedException { TransformConfig.Builder configBuilder = new TransformConfig.Builder(TransformConfigTests.randomTransformConfig()); - if (randomBoolean()) { - // TransformConfigTests.randomTransformConfig never produces remote indices in the source. - // We need to explicitly set the remote index here for coverage. - configBuilder.setSource(new SourceConfig("remote-cluster:remote-index")); - } TransformConfig config = configBuilder.build(); boolean pitEnabled = TransformEffectiveSettings.isPitDisabled(config.getSettings()) == false; From 67630917a58c4f5771a47b58338df0afbec547d0 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 29 Apr 2024 12:35:23 -0400 Subject: [PATCH 031/244] ESQL: Add ColumnLookupOperator (#108020) This adds an operator ot lookup values from fixed columns which will be useful in making inline LOOKUP operations. --- .../compute/operator/ColumnLoadOperator.java | 74 ++++++++++++++++ .../operator/ColumnLoadOperatorTests.java | 84 +++++++++++++++++++ .../operator/HashLookupOperatorTests.java | 48 ++++++++++- .../compute/operator/OperatorTestCase.java | 6 +- 4 files changed, 206 insertions(+), 6 deletions(-) create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java new file mode 100644 index 0000000000000..4e06c1f0f4b69 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ColumnLoadOperator.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.ReleasableIterator; + +/** + * {@link Block#lookup Looks up} values from a provided {@link Block} and + * mergeds them into each {@link Page}. + */ +public class ColumnLoadOperator extends AbstractPageMappingToIteratorOperator { + public record Values(String name, Block block) { + @Override + public String toString() { + return name + ":" + block.elementType(); + } + } + + /** + * Factory for {@link ColumnLoadOperator}. It's received {@link Block}s + * are never closed, so we need to build them from a non-tracking factory. + */ + public record Factory(Values values, int positionsOrd) implements OperatorFactory { + @Override + public Operator get(DriverContext driverContext) { + return new ColumnLoadOperator(values, positionsOrd); + } + + @Override + public String describe() { + return "ColumnLoad[values=" + values + ", positions=" + positionsOrd + "]"; + } + } + + private final Values values; + private final int positionsOrd; + + public ColumnLoadOperator(Values values, int positionsOrd) { + this.values = values; + this.positionsOrd = positionsOrd; + } + + /** + * The target size of each loaded block. + * TODO target the size more intelligently + */ + static final ByteSizeValue TARGET_BLOCK_SIZE = ByteSizeValue.ofKb(10); + + @Override + protected ReleasableIterator receive(Page page) { + // TODO tracking is complex for values + /* + * values is likely shared across many threads so tracking it is complex. + * Lookup will incRef it on the way in and decrement the ref on the way + * out but it's not really clear what the right way to get all that thread + * safe is. For now we can ignore this because we're not actually tracking + * the memory of the block. + */ + return appendBlocks(page, values.block.lookup(page.getBlock(positionsOrd), TARGET_BLOCK_SIZE)); + } + + @Override + public String toString() { + return "ColumnLoad[values=" + values + ", positions=" + positionsOrd + "]"; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java new file mode 100644 index 0000000000000..c606e4fd4c736 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnLoadOperatorTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; + +import java.util.List; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class ColumnLoadOperatorTests extends OperatorTestCase { + @Override + protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { + return new SequenceIntBlockSourceOperator(blockFactory, IntStream.range(0, size).map(l -> between(0, 4))); + } + + @Override + protected void assertSimpleOutput(List input, List results) { + int count = input.stream().mapToInt(Page::getPositionCount).sum(); + assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(count)); + int keysIdx = 0; + int loadedIdx = 0; + IntBlock keys = null; + int keysOffset = 0; + LongBlock loaded = null; + int loadedOffset = 0; + int p = 0; + while (p < count) { + if (keys == null) { + keys = input.get(keysIdx++).getBlock(0); + } + if (loaded == null) { + loaded = results.get(loadedIdx++).getBlock(1); + } + int valueCount = keys.getValueCount(p - keysOffset); + assertThat(loaded.getValueCount(p - loadedOffset), equalTo(valueCount)); + int keysStart = keys.getFirstValueIndex(p - keysOffset); + int loadedStart = loaded.getFirstValueIndex(p - loadedOffset); + for (int k = keysStart, l = loadedStart; k < keysStart + valueCount; k++, l++) { + assertThat(loaded.getLong(l), equalTo(3L * keys.getInt(k))); + } + p++; + if (p - keysOffset == keys.getPositionCount()) { + keysOffset += keys.getPositionCount(); + keys = null; + } + if (p - loadedOffset == loaded.getPositionCount()) { + loadedOffset += loaded.getPositionCount(); + loaded = null; + } + } + } + + @Override + protected Operator.OperatorFactory simple() { + return new ColumnLoadOperator.Factory( + new ColumnLoadOperator.Values( + "values", + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 0, 3, 6, 9, 12 }, 5).asBlock() + ), + 0 + ); + } + + @Override + protected String expectedDescriptionOfSimple() { + return "ColumnLoad[values=values:LONG, positions=0]"; + } + + @Override + protected String expectedToStringOfSimple() { + return expectedDescriptionOfSimple(); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index e2af5e82f0d46..711800197aa03 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.compute.operator; import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; @@ -24,7 +26,45 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @Override protected void assertSimpleOutput(List input, List results) { - assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(input.stream().mapToInt(Page::getPositionCount).sum())); + int count = input.stream().mapToInt(Page::getPositionCount).sum(); + assertThat(results.stream().mapToInt(Page::getPositionCount).sum(), equalTo(count)); + int keysIdx = 0; + int ordsIdx = 0; + LongBlock keys = null; + int keysOffset = 0; + IntBlock ords = null; + int ordsOffset = 0; + int p = 0; + while (p < count) { + if (keys == null) { + keys = input.get(keysIdx++).getBlock(0); + } + if (ords == null) { + ords = results.get(ordsIdx++).getBlock(1); + } + int valueCount = keys.getValueCount(p - keysOffset); + assertThat(ords.getValueCount(p - ordsOffset), equalTo(valueCount)); + int keysStart = keys.getFirstValueIndex(p - keysOffset); + int ordsStart = ords.getFirstValueIndex(p - ordsOffset); + for (int k = keysStart, l = ordsStart; k < keysStart + valueCount; k++, l++) { + assertThat(ords.getInt(l), equalTo(switch ((int) keys.getLong(k)) { + case 1 -> 0; + case 7 -> 1; + case 14 -> 2; + case 20 -> 3; + default -> null; + })); + } + p++; + if (p - keysOffset == keys.getPositionCount()) { + keysOffset += keys.getPositionCount(); + keys = null; + } + if (p - ordsOffset == ords.getPositionCount()) { + ordsOffset += ords.getPositionCount(); + ords = null; + } + } } @Override @@ -33,7 +73,7 @@ protected Operator.OperatorFactory simple() { new HashLookupOperator.Key[] { new HashLookupOperator.Key( "foo", - TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 7, 14, 20 }, 3).asBlock() + TestBlockFactory.getNonBreakingInstance().newLongArrayVector(new long[] { 1, 7, 14, 20 }, 4).asBlock() ) }, new int[] { 0 } ); @@ -41,11 +81,11 @@ protected Operator.OperatorFactory simple() { @Override protected String expectedDescriptionOfSimple() { - return "HashLookup[keys=[{name=foo, type=LONG, positions=3, size=96b}], mapping=[0]]"; + return "HashLookup[keys=[{name=foo, type=LONG, positions=4, size=104b}], mapping=[0]]"; } @Override protected String expectedToStringOfSimple() { - return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=3, size=536b}, mapping=[0]]"; + return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=4, size=544b}, mapping=[0]]"; } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index eebcbc091d3ea..be792a0ef2612 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; +import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasables; @@ -112,7 +113,7 @@ public final void testSimpleCircuitBreaking() { private void runWithLimit(Operator.OperatorFactory factory, List input, ByteSizeValue limit) { BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); - BlockFactory blockFactory = BlockFactory.getInstance(breaker, bigArrays); + MockBlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); DriverContext driverContext = new DriverContext(bigArrays, blockFactory); List localInput = CannedSourceOperator.deepCopyOf(blockFactory, input); boolean driverStarted = false; @@ -125,7 +126,8 @@ private void runWithLimit(Operator.OperatorFactory factory, List input, By // if drive hasn't even started then we need to release the input pages manually Releasables.closeExpectNoException(Releasables.wrap(() -> Iterators.map(localInput.iterator(), p -> p::releaseBlocks))); } - assertThat(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST).getUsed(), equalTo(0L)); + blockFactory.ensureAllBlocksAreReleased(); + assertThat(breaker.getUsed(), equalTo(0L)); } } From b6ccb842cabd0d5b518c6bb6ab1997f1ab1f3152 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 29 Apr 2024 12:25:37 -0700 Subject: [PATCH 032/244] Widen numeric counter types (#108036) We should widen the numeric root types before converting them into counter types. Relates #107877 --- .../src/main/resources/tsdb-mapping.json | 4 ++++ .../xpack/esql/type/EsqlDataTypes.java | 7 ++++++- .../xpack/esql/analysis/AnalyzerTests.java | 20 +++++++++++++++++++ 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json index c3bba9724602b..dd4073d5dc7cf 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/tsdb-mapping.json @@ -23,6 +23,10 @@ "bytes_out": { "type": "long", "time_series_metric": "counter" + }, + "message_in": { + "type": "float", + "time_series_metric": "counter" } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index 912c17dae0865..44f6844544698 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -249,7 +249,12 @@ public static DataType widenSmallNumericTypes(DataType type) { } public static DataType getCounterType(String typeName) { - return fromTypeName("counter_" + typeName); + final DataType rootType = widenSmallNumericTypes(fromName(typeName)); + if (rootType == UNSUPPORTED) { + return rootType; + } + assert rootType == LONG || rootType == INTEGER || rootType == DOUBLE : rootType; + return fromTypeName("counter_" + rootType.typeName()); } public static boolean isCounterType(DataType dt) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 1f32a5a76f3e8..3757720cc203a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlIndexResolver; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -59,6 +60,8 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; @@ -69,6 +72,7 @@ import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyze; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.analyzer; import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.loadMapping; +import static org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils.tsdbIndexResolution; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -1626,6 +1630,22 @@ public void testChainedEvalFieldsUse() { assertProjection(query + " | keep x*", IntStream.range(0, additionalEvals + 3).mapToObj(v -> "x" + v).toArray(String[]::new)); } + public void testCounterTypes() { + var query = "FROM test | KEEP network.* | LIMIT 10"; + Analyzer analyzer = analyzer(tsdbIndexResolution()); + LogicalPlan plan = analyze(query, analyzer); + var limit = as(plan, Limit.class); + var attributes = limit.output().stream().collect(Collectors.toMap(NamedExpression::name, a -> a)); + assertThat( + attributes.keySet(), + equalTo(Set.of("network.connections", "network.bytes_in", "network.bytes_out", "network.message_in")) + ); + assertThat(attributes.get("network.connections").dataType(), equalTo(DataTypes.LONG)); + assertThat(attributes.get("network.bytes_in").dataType(), equalTo(EsqlDataTypes.COUNTER_LONG)); + assertThat(attributes.get("network.bytes_out").dataType(), equalTo(EsqlDataTypes.COUNTER_LONG)); + assertThat(attributes.get("network.message_in").dataType(), equalTo(EsqlDataTypes.COUNTER_DOUBLE)); + } + public void testMissingAttributeException_InChainedEval() { var e = expectThrows(VerificationException.class, () -> analyze(""" from test From 32deb7fa465913aac8fdbb6b6eecc848706f17fd Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 29 Apr 2024 13:25:47 -0600 Subject: [PATCH 033/244] Decouple API key version from node version (#104156) This commit decouples the version stored in API key documents from the node version, as part of the broader effort to stop relying on node version for product logic. The actual data format does not change - the version is stored as an integer before and after this commit, it's just determined by a manually set `ApiKey.Version` class rather than being derived from the node version. --- .../ComponentVersionsNodesInfoIT.java | 2 +- .../core/src/main/java/module-info.java | 3 ++- .../core/security/action/apikey/ApiKey.java | 24 +++++++++++++++++++ ...n.cluster.node.info.ComponentVersionNumber | 1 + .../xpack/security/authc/ApiKeyService.java | 20 +++++++--------- .../security/authc/ApiKeyServiceTests.java | 17 +++++++------ 6 files changed, 47 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java index 32024ff03ed15..1202f828059f6 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/nodesinfo/ComponentVersionsNodesInfoIT.java @@ -29,7 +29,7 @@ public void testNodesInfoComponentVersions() { assertThat(response.getNodesMap().get(server1NodeId), notNullValue()); assertThat( response.getNodesMap().get(server1NodeId).getComponentVersions().keySet(), - containsInAnyOrder("transform_config_version", "ml_config_version") + containsInAnyOrder("transform_config_version", "ml_config_version", "api_key_version") ); } } diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index 77def0fd12459..070df2efc2629 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -232,7 +232,8 @@ provides org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber with org.elasticsearch.xpack.core.ml.MlConfigVersionComponent, - org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent; + org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent, + org.elasticsearch.xpack.core.security.action.apikey.ApiKey.VersionComponent; provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.core.XPackFeatures; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index 57cf816a46072..cee63c16229e0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.core.security.action.apikey; +import org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber; +import org.elasticsearch.common.VersionId; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.Nullable; @@ -81,6 +83,28 @@ public String value() { } } + public record Version(int version) implements VersionId { + @Override + public int id() { + return version; + } + } + + public static class VersionComponent implements ComponentVersionNumber { + + @Override + public String componentId() { + return "api_key_version"; + } + + @Override + public VersionId versionNumber() { + return CURRENT_API_KEY_VERSION; + } + } + + public static final ApiKey.Version CURRENT_API_KEY_VERSION = new ApiKey.Version(8_13_00_99); + private final String name; private final String id; private final Type type; diff --git a/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber index 078217faee53a..568483f03f756 100644 --- a/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber +++ b/x-pack/plugin/core/src/main/resources/META-INF/services/org.elasticsearch.action.admin.cluster.node.info.ComponentVersionNumber @@ -1,2 +1,3 @@ org.elasticsearch.xpack.core.ml.MlConfigVersionComponent org.elasticsearch.xpack.core.transform.TransformConfigVersionComponent +org.elasticsearch.xpack.core.security.action.apikey.ApiKey$VersionComponent diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index 1a5b1ab39cd83..fa9b53c5af935 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -14,7 +14,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.DocWriteRequest; @@ -415,7 +414,6 @@ private void createApiKeyAndIndexIt( final Instant expiration = getApiKeyExpiration(created, request.getExpiration()); final SecureString apiKey = UUIDs.randomBase64UUIDSecureString(); assert ApiKey.Type.CROSS_CLUSTER != request.getType() || API_KEY_SECRET_LENGTH == apiKey.length(); - final Version version = clusterService.state().nodes().getMinNodeVersion(); computeHashForApiKey(apiKey, listener.delegateFailure((l, apiKeyHashChars) -> { try ( @@ -428,7 +426,7 @@ private void createApiKeyAndIndexIt( expiration, request.getRoleDescriptors(), request.getType(), - version, + ApiKey.CURRENT_API_KEY_VERSION, request.getMetadata() ) ) { @@ -712,7 +710,7 @@ static XContentBuilder newDocument( Instant expiration, List keyRoleDescriptors, ApiKey.Type type, - Version version, + ApiKey.Version version, @Nullable Map metadata ) throws IOException { final XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -727,7 +725,7 @@ static XContentBuilder newDocument( addRoleDescriptors(builder, keyRoleDescriptors); addLimitedByRoleDescriptors(builder, userRoleDescriptors); - builder.field("name", name).field("version", version.id).field("metadata_flattened", metadata); + builder.field("name", name).field("version", version.version()).field("metadata_flattened", metadata); addCreator(builder, authentication); return builder.endObject(); @@ -742,7 +740,7 @@ static XContentBuilder newDocument( static XContentBuilder maybeBuildUpdatedDocument( final String apiKeyId, final ApiKeyDoc currentApiKeyDoc, - final Version targetDocVersion, + final ApiKey.Version targetDocVersion, final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors, @@ -779,7 +777,7 @@ static XContentBuilder maybeBuildUpdatedDocument( addLimitedByRoleDescriptors(builder, userRoleDescriptors); - builder.field("name", currentApiKeyDoc.name).field("version", targetDocVersion.id); + builder.field("name", currentApiKeyDoc.name).field("version", targetDocVersion.version()); assert currentApiKeyDoc.metadataFlattened == null || MetadataUtils.containsReservedMetadata( @@ -807,12 +805,12 @@ static XContentBuilder maybeBuildUpdatedDocument( private static boolean isNoop( final String apiKeyId, final ApiKeyDoc apiKeyDoc, - final Version targetDocVersion, + final ApiKey.Version targetDocVersion, final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors ) throws IOException { - if (apiKeyDoc.version != targetDocVersion.id) { + if (apiKeyDoc.version != targetDocVersion.version()) { return false; } @@ -1468,8 +1466,8 @@ private IndexRequest maybeBuildIndexRequest( currentVersionedDoc.primaryTerm() ); } - final var targetDocVersion = clusterService.state().nodes().getMinNodeVersion(); - final var currentDocVersion = Version.fromId(currentVersionedDoc.doc().version); + final var targetDocVersion = ApiKey.CURRENT_API_KEY_VERSION; + final var currentDocVersion = new ApiKey.Version(currentVersionedDoc.doc().version); assert currentDocVersion.onOrBefore(targetDocVersion) : "current API key doc version must be on or before target version"; if (logger.isDebugEnabled() && currentDocVersion.before(targetDocVersion)) { logger.debug( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 0cb7a270099ad..b3ec3ef117c3e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -67,7 +67,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.TransportVersionUtils; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; @@ -1161,7 +1160,7 @@ private static Tuple, Map> newApiKeyDocument Instant.now().plus(expiry), keyRoles, type, - Version.CURRENT, + ApiKey.CURRENT_API_KEY_VERSION, metadataMap ); Map keyMap = XContentHelper.convertToMap(BytesReference.bytes(docSource), true, XContentType.JSON).v2(); @@ -2368,7 +2367,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final long now = randomMillisUpToYear9999(); when(clock.instant()).thenReturn(Instant.ofEpochMilli(now)); final Map oldMetadata = ApiKeyTests.randomMetadata(); - final Version oldVersion = VersionUtils.randomVersion(random()); + final ApiKey.Version oldVersion = new ApiKey.Version(randomIntBetween(1, ApiKey.CURRENT_API_KEY_VERSION.version())); final ApiKeyDoc oldApiKeyDoc = ApiKeyDoc.fromXContent( XContentHelper.createParser( XContentParserConfiguration.EMPTY, @@ -2419,8 +2418,8 @@ public void testMaybeBuildUpdatedDocument() throws IOException { final Map newMetadata = changeMetadata ? randomValueOtherThanMany(md -> md == null || md.equals(oldMetadata), ApiKeyTests::randomMetadata) : (randomBoolean() ? oldMetadata : null); - final Version newVersion = changeVersion - ? randomValueOtherThan(oldVersion, () -> VersionUtils.randomVersion(random())) + final ApiKey.Version newVersion = changeVersion + ? randomValueOtherThan(oldVersion, ApiKeyServiceTests::randomApiKeyVersion) : oldVersion; final Authentication newAuthentication = changeCreator ? randomValueOtherThanMany( @@ -2468,7 +2467,7 @@ public void testMaybeBuildUpdatedDocument() throws IOException { assertEquals(oldApiKeyDoc.hash, updatedApiKeyDoc.hash); assertEquals(oldApiKeyDoc.creationTime, updatedApiKeyDoc.creationTime); assertEquals(oldApiKeyDoc.invalidated, updatedApiKeyDoc.invalidated); - assertEquals(newVersion.id, updatedApiKeyDoc.version); + assertEquals(newVersion.version(), updatedApiKeyDoc.version); final var actualUserRoles = service.parseRoleDescriptorsBytes( "", updatedApiKeyDoc.limitedByRoleDescriptorsBytes, @@ -2991,7 +2990,7 @@ public static Authentication createApiKeyAuthentication( Instant.now().plus(Duration.ofSeconds(3600)), keyRoles, ApiKey.Type.REST, - Version.CURRENT, + ApiKey.CURRENT_API_KEY_VERSION, randomBoolean() ? null : Map.of(randomAlphaOfLengthBetween(3, 8), randomAlphaOfLengthBetween(3, 8)) ); final ApiKeyDoc apiKeyDoc = ApiKeyDoc.fromXContent( @@ -3207,4 +3206,8 @@ private static Authenticator.Context getAuthenticatorContext(ThreadContext threa mock(Realms.class) ); } + + private static ApiKey.Version randomApiKeyVersion() { + return new ApiKey.Version(randomIntBetween(1, ApiKey.CURRENT_API_KEY_VERSION.version())); + } } From a451511e3a127e0c9f2b8c91d67a7d2c4e66c44f Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Mon, 29 Apr 2024 15:53:47 -0400 Subject: [PATCH 034/244] Change skip_unavailable default value to true (#105792) In order to improve the experience of cross-cluster search, we are changing the default value of the remote cluster `skip_unavailable` setting from `false` to `true`. This setting causes any cross-cluster _search (or _async_search) to entirely fail when any remote cluster with `skip_unavailable=false` is either unavailable (connection to it fails) or if the search on it fails on all shards. Setting `skip_unavailable=true` allows partial results from other clusters to be returned. In that case, the search response cluster metadata will show a `skipped` status, so the user can see that no data came in from that cluster. Kibana also now leverages this metadata in the cross-cluster search responses to allow users to see how many clusters returned data and drill down into which clusters did not (including failure messages). Currently, the user/admin has to specifically set the value to `true` in the configs, like so: ``` cluster: remote: remote1: seeds: 10.10.10.10:9300 skip_unavailable: true ``` even though that is probably what search admins want in the vast majority of cases. Setting `skip_unavailable=false` should be a conscious (and probably rare) choice by an Elasticsearch admin that a particular cluster's results are so essential to a search (or visualization in dashboard or Discover panel) that no results at all should be shown if it cannot return any results. --- docs/changelog/105792.yaml | 18 ++++ docs/reference/ccr/getting-started.asciidoc | 2 +- .../cluster/remote-clusters-connect.asciidoc | 18 ++-- .../cluster/remote-clusters-settings.asciidoc | 17 +++- .../search-across-clusters.asciidoc | 54 ++++++---- .../index/reindex/CrossClusterReindexIT.java | 6 ++ .../rest/yaml/CcsCommonYamlTestSuiteIT.java | 1 + .../yaml/RcsCcsCommonYamlTestSuiteIT.java | 1 + qa/multi-cluster-search/build.gradle | 1 + .../test/multi_cluster/10_basic.yml | 2 +- .../test/multi_cluster/20_info.yml | 16 +-- .../transport/RemoteClusterService.java | 2 +- .../search/TransportSearchActionTests.java | 40 +++++--- .../transport/RemoteClusterClientTests.java | 1 + .../transport/RemoteClusterServiceTests.java | 2 +- .../transport/RemoteClusterSettingsTests.java | 2 +- .../RemoteClusterSecurityApiKeyRestIT.java | 4 +- .../RemoteClusterSecurityEsqlIT.java | 18 +++- ...lusterSecurityFcActionAuthorizationIT.java | 54 +++++++--- ...ecurityLicensingAndFeatureUsageRestIT.java | 2 + .../RemoteClusterSecurityRestIT.java | 99 +++++++++++++------ .../legacy-with-basic-license/build.gradle | 1 + 22 files changed, 254 insertions(+), 107 deletions(-) create mode 100644 docs/changelog/105792.yaml diff --git a/docs/changelog/105792.yaml b/docs/changelog/105792.yaml new file mode 100644 index 0000000000000..2ad5aa970c214 --- /dev/null +++ b/docs/changelog/105792.yaml @@ -0,0 +1,18 @@ +pr: 105792 +summary: "Change `skip_unavailable` remote cluster setting default value to true" +area: Search +type: breaking +issues: [] +breaking: + title: "Change `skip_unavailable` remote cluster setting default value to true" + area: Cluster and node setting + details: The default value of the `skip_unavailable` setting is now set to true. + All existing and future remote clusters that do not define this setting will use the new default. + This setting only affects cross-cluster searches using the _search or _async_search API. + impact: Unavailable remote clusters in a cross-cluster search will no longer cause the search to fail unless + skip_unavailable is configured to be `false` in elasticsearch.yml or via the `_cluster/settings` API. + Unavailable clusters with `skip_unavailable`=`true` (either explicitly or by using the new default) are marked + as SKIPPED in the search response metadata section and do not fail the entire search. If users want to ensure that a + search returns a failure when a particular remote cluster is not available, `skip_unavailable` must be now be + set explicitly. + notable: false diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index d30cd43a4db5e..a9fe8be93d018 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -147,7 +147,7 @@ cluster with cluster alias `leader`. "num_nodes_connected" : 1, <1> "max_connections_per_cluster" : 3, "initial_connect_timeout" : "30s", - "skip_unavailable" : false, + "skip_unavailable" : true, "mode" : "sniff" } } diff --git a/docs/reference/modules/cluster/remote-clusters-connect.asciidoc b/docs/reference/modules/cluster/remote-clusters-connect.asciidoc index 7fb345660e086..5344cb97465d7 100644 --- a/docs/reference/modules/cluster/remote-clusters-connect.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-connect.asciidoc @@ -37,7 +37,7 @@ clusters on individual nodes in the local cluster, define static settings in `elasticsearch.yml` for each node. The following request adds a remote cluster with an alias of `cluster_one`. This -_cluster alias_ is a unique identifier that represents the connection to the +_cluster alias_ is a unique identifier that represents the connection to the remote cluster and is used to distinguish between local and remote indices. [source,console,subs=attributes+] @@ -60,7 +60,7 @@ PUT /_cluster/settings // TEST[setup:host] // TEST[s/127.0.0.1:\{remote-interface-default-port\}/\${transport_host}/] <1> The cluster alias of this remote cluster is `cluster_one`. -<2> Specifies the hostname and {remote-interface} port of a seed node in the +<2> Specifies the hostname and {remote-interface} port of a seed node in the remote cluster. You can use the <> to verify that @@ -86,7 +86,7 @@ cluster with the cluster alias `cluster_one`: "num_nodes_connected" : 1, <1> "max_connections_per_cluster" : 3, "initial_connect_timeout" : "30s", - "skip_unavailable" : false, <2> + "skip_unavailable" : true, <2> ifeval::["{trust-mechanism}"=="api-key"] "cluster_credentials": "::es_redacted::", <3> endif::[] @@ -103,7 +103,7 @@ connected to. <2> Indicates whether to skip the remote cluster if searched through {ccs} but no nodes are available. ifeval::["{trust-mechanism}"=="api-key"] -<3> If present, indicates the remote cluster has connected using API key +<3> If present, indicates the remote cluster has connected using API key authentication. endif::[] @@ -187,7 +187,7 @@ PUT _cluster/settings You can delete a remote cluster from the cluster settings by passing `null` values for each remote cluster setting. The following request removes -`cluster_two` from the cluster settings, leaving `cluster_one` and +`cluster_two` from the cluster settings, leaving `cluster_one` and `cluster_three` intact: [source,console] @@ -212,15 +212,15 @@ PUT _cluster/settings ===== Statically configure remote clusters If you specify settings in `elasticsearch.yml`, only the nodes with -those settings can connect to the remote cluster and serve remote cluster +those settings can connect to the remote cluster and serve remote cluster requests. -NOTE: Remote cluster settings that are specified using the +NOTE: Remote cluster settings that are specified using the <> take precedence over settings that you specify in `elasticsearch.yml` for individual nodes. -In the following example, `cluster_one`, `cluster_two`, and `cluster_three` are -arbitrary cluster aliases representing the connection to each cluster. These +In the following example, `cluster_one`, `cluster_two`, and `cluster_three` are +arbitrary cluster aliases representing the connection to each cluster. These names are subsequently used to distinguish between local and remote indices. [source,yaml,subs=attributes+] diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index bba8c7ffb3491..ec61c4c59fc74 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -28,9 +28,20 @@ mode are described separately. Per cluster boolean setting that allows to skip specific clusters when no nodes belonging to them are available and they are the target of a remote - cluster request. Default is `false`, meaning that all clusters are mandatory - by default, but they can selectively be made optional by setting this setting - to `true`. + cluster request. + +IMPORTANT: In Elasticsearch 8.15, the default value for `skip_unavailable` was +changed from `false` to `true`. Before Elasticsearch 8.15, if you want a cluster +to be treated as optional for a {ccs}, then you need to set that configuration. +From Elasticsearch 8.15 forward, you need to set the configuration in order to +make a cluster required for the {ccs}. Once you upgrade the local ("querying") +cluster search coordinator node (the node you send CCS requests to) to 8.15 or later, +any remote clusters that do not have an explicit setting for `skip_unavailable` will +immediately change over to using the new default of true. This is true regardless of +whether you have upgraded the remote clusters to 8.15, as the `skip_unavailable` +search behavior is entirely determined by the setting on the local cluster where +you configure the remotes. + `cluster.remote..transport.ping_schedule`:: diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 2573722b6d2e7..5f9e92c575793 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -1178,7 +1178,13 @@ gathered from all 3 clusters and the total shard count on each cluster is listed By default, a {ccs} fails if a remote cluster in the request is unavailable or returns an error where the search on all shards failed. Use the `skip_unavailable` cluster setting to mark a specific remote cluster as -optional for {ccs}. +either optional or required for {ccs}. + +IMPORTANT: In Elasticsearch 8.15, the default value for `skip_unavailable` was +changed from `false` to `true`. Before Elasticsearch 8.15, if you want a cluster +to be treated as optional for a {ccs}, then you need to set that configuration. +From Elasticsearch 8.15 forward, you need to set the configuration in order to +make a cluster required for the {ccs}. If `skip_unavailable` is `true`, a {ccs}: @@ -1196,25 +1202,33 @@ parameter and the related `search.default_allow_partial_results` cluster setting when searching the remote cluster. This means searches on the remote cluster may return partial results. -The following <> -API request changes `skip_unavailable` setting to `true` for `cluster_two`. - -[source,console] --------------------------------- -PUT _cluster/settings -{ - "persistent": { - "cluster.remote.cluster_two.skip_unavailable": true - } -} --------------------------------- -// TEST[continued] - -If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't -include matching documents from that cluster in the final results. If at -least one shard provides results, those results will be used and the -search will return partial data. (If doing {ccs} using async search, -the `is_partial` field will be set to `true` to indicate partial results.) +You can modify the `skip_unavailable` setting by editing the `cluster.remote.` +settings in the elasticsearch.yml config file. For example: + +``` +cluster: + remote: + cluster_one: + seeds: 35.238.149.1:9300 + skip_unavailable: false + cluster_two: + seeds: 35.238.149.2:9300 + skip_unavailable: true +``` + +Or you can set the cluster.remote settings via the +<> API as shown +<>. + +When a remote cluster configured with `skip_unavailable: true` (such as +`cluster_two` above) is disconnected or unavailable during a {ccs}, {es} won't +include matching documents from that cluster in the final results and the +search will be considered successful (HTTP status 200 OK). + +If at least one shard from a cluster provides search results, those results will +be used and the search will return partial data. This is true regardless of +the `skip_unavailable` setting of the remote cluster. (If doing {ccs} using async +search, the `is_partial` field will be set to `true` to indicate partial results.) [discrete] [[ccs-network-delays]] diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java index a4f939fbe3af8..e0396039029c5 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/CrossClusterReindexIT.java @@ -19,6 +19,7 @@ import java.util.Collection; import java.util.List; +import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -38,6 +39,11 @@ protected Collection remoteClusterAlias() { return List.of(REMOTE_CLUSTER); } + @Override + protected Map skipUnavailableForRemoteClusters() { + return Map.of(REMOTE_CLUSTER, false); + } + @Override protected Collection> nodePlugins(String clusterAlias) { return List.of(ReindexPlugin.class); diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index cc613671c860c..a8cff14ff6220 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -101,6 +101,7 @@ public class CcsCommonYamlTestSuiteIT extends ESClientYamlSuiteTestCase { .setting("node.roles", "[data,ingest,master,remote_cluster_client]") .setting("cluster.remote.remote_cluster.seeds", () -> "\"" + remoteCluster.getTransportEndpoint(0) + "\"") .setting("cluster.remote.connections_per_cluster", "1") + .setting("cluster.remote.remote_cluster.skip_unavailable", "false") .apply(commonClusterConfig) .build(); diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index 5a58f3629df14..e3639ffabf664 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -246,6 +246,7 @@ public void initSearchClient() throws IOException { private static void configureRemoteCluster() throws IOException { final Settings.Builder builder = Settings.builder(); + builder.put("cluster.remote." + REMOTE_CLUSTER_NAME + ".skip_unavailable", "false"); if (randomBoolean()) { builder.put("cluster.remote." + REMOTE_CLUSTER_NAME + ".mode", "proxy") .put("cluster.remote." + REMOTE_CLUSTER_NAME + ".proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)); diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 23c46c5804a6e..d0cbc208f4d8e 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -48,6 +48,7 @@ BuildParams.bwcVersions.withWireCompatible(ccsSupportedVersion) { bwcVersion, ba setting 'cluster.remote.connections_per_cluster', '1' setting 'cluster.remote.my_remote_cluster.seeds', { "\"${remoteCluster.get().getAllTransportPortURI().get(0)}\"" } + setting 'cluster.remote.my_remote_cluster.skip_unavailable', 'false' } tasks.register("${baseName}#remote-cluster", RestIntegTestTask) { diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 8bbbc7435ff5a..da1245268a0a2 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -249,7 +249,7 @@ persistent: cluster.remote.test_remote_cluster.seeds: $remote_ip - - match: {persistent: {cluster.remote.test_remote_cluster.seeds: $remote_ip}} + - match: {persistent.cluster\.remote\.test_remote_cluster\.seeds: $remote_ip} - do: search: diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml index 144990163583b..da4c91869e53d 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yml @@ -113,33 +113,33 @@ - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: body: persistent: - cluster.remote.remote1.skip_unavailable: true + cluster.remote.remote1.skip_unavailable: false - - is_true: persistent.cluster.remote.remote1.skip_unavailable + - is_false: persistent.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} - - is_true: remote1.skip_unavailable + - is_false: remote1.skip_unavailable - do: cluster.put_settings: body: persistent: - cluster.remote.remote1.skip_unavailable: false + cluster.remote.remote1.skip_unavailable: true - - is_false: persistent.cluster.remote.remote1.skip_unavailable + - is_true: persistent.cluster.remote.remote1.skip_unavailable - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: @@ -152,7 +152,7 @@ - do: cluster.remote_info: {} - - is_false: remote1.skip_unavailable + - is_true: remote1.skip_unavailable - do: cluster.put_settings: diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 6060e1fed1397..06fb23ba14749 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -87,7 +87,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public static final Setting.AffixSetting REMOTE_CLUSTER_SKIP_UNAVAILABLE = Setting.affixKeySetting( "cluster.remote.", "skip_unavailable", - (ns, key) -> boolSetting(key, false, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope) + (ns, key) -> boolSetting(key, true, new RemoteConnectionEnabled<>(ns, key), Setting.Property.Dynamic, Setting.Property.NodeScope) ); public static final Setting.AffixSetting REMOTE_CLUSTER_PING_SCHEDULE = Setting.affixKeySetting( diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index fea391e8205f5..a35dac8157517 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -469,7 +469,8 @@ private MockTransportService[] startTransport( int numClusters, DiscoveryNode[] nodes, Map remoteIndices, - Settings.Builder settingsBuilder + Settings.Builder settingsBuilder, + boolean skipUnavailable ) { MockTransportService[] mockTransportServices = new MockTransportService[numClusters]; for (int i = 0; i < numClusters; i++) { @@ -486,6 +487,7 @@ private MockTransportService[] startTransport( knownNodes.add(remoteSeedNode); nodes[i] = remoteSeedNode; settingsBuilder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); + settingsBuilder.put("cluster.remote.remote" + i + ".skip_unavailable", Boolean.toString(skipUnavailable)); remoteIndices.put("remote" + i, new OriginalIndices(new String[] { "index" }, IndicesOptions.lenientExpandOpen())); } return mockTransportServices; @@ -496,7 +498,8 @@ public void testCCSRemoteReduceMergeFails() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -566,7 +569,8 @@ public void testCCSRemoteReduce() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -709,7 +713,8 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + boolean skipUnavailable = randomBoolean(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, skipUnavailable); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -734,10 +739,13 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { final CountDownLatch latch = new CountDownLatch(1); SetOnce>> setOnce = new SetOnce<>(); AtomicReference failure = new AtomicReference<>(); - LatchedActionListener listener = new LatchedActionListener<>( - ActionListener.wrap(r -> fail("no response expected"), failure::set), - latch - ); + LatchedActionListener listener = new LatchedActionListener<>(ActionListener.wrap(r -> { + if (skipUnavailable) { + assertThat(r.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(numClusters)); + } else { + fail("no response expected"); // failure should be returned, not SearchResponse + } + }, failure::set), latch); TaskId parentTaskId = new TaskId("n", 1); SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); @@ -763,10 +771,14 @@ public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { resolveWithEmptySearchResponse(tuple); } awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); - assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + if (skipUnavailable) { + assertNull(failure.get()); + } else { + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); + assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + } } } finally { @@ -781,7 +793,7 @@ public void testCCSRemoteReduceWithDisconnectedRemoteClusters() throws Exception DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false); Settings settings = builder.build(); boolean local = randomBoolean(); OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; @@ -1035,7 +1047,7 @@ public void testCollectSearchShards() throws Exception { DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; Map remoteIndicesByCluster = new HashMap<>(); Settings.Builder builder = Settings.builder(); - MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder, false); Settings settings = builder.build(); try ( MockTransportService service = MockTransportService.createNewService( diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java index bfd626dd3d153..bb18420276190 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -145,6 +145,7 @@ public void testEnsureWeReconnect() throws Exception { Settings localSettings = Settings.builder() .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) .put("cluster.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()) + .put("cluster.remote.test.skip_unavailable", "false") // ensureConnected is only true for skip_unavailable=false .build(); try ( MockTransportService service = MockTransportService.createNewService( diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 29a5d5a34e37f..9f70ab879cb25 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -1282,7 +1282,7 @@ public void testSkipUnavailable() { service.start(); service.acceptIncomingRequests(); - assertFalse(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + assertTrue(service.getRemoteClusterService().isSkipUnavailable("cluster1")); if (randomBoolean()) { updateSkipUnavailable(service.getRemoteClusterService(), "cluster1", false); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java index c61dc93f962c6..be474b4a5d530 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterSettingsTests.java @@ -68,7 +68,7 @@ public void testRemoveRemoteClusterClientRole() { public void testSkipUnavailableDefault() { final String alias = randomAlphaOfLength(8); - assertFalse(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); + assertTrue(REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace(alias).get(Settings.EMPTY)); } public void testSeedsDefault() { diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java index 3d644103dfb6f..2f3ece56b3281 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java @@ -320,14 +320,16 @@ public void testCrossClusterSearchWithApiKey() throws Exception { ) ); - // Check that authentication fails if we use a non-existent cross cluster access API key + // Check that authentication fails if we use a non-existent cross cluster access API key (when skip_unavailable=false) updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", "false") .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", "false") .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java index 3a7bc49340333..931d3b94669fb 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityEsqlIT.java @@ -499,12 +499,18 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { configureRemoteCluster(); populateData(); + final boolean skipUnavailable = randomBoolean(); + // avoids getting 404 errors updateClusterSettings( randomBoolean() - ? Settings.builder().put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)).build() + ? Settings.builder() + .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) + .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); @@ -520,8 +526,14 @@ public void testCrossClusterQueryAgainstInvalidRemote() throws Exception { var q2 = "FROM invalid_remote:employees | SORT emp_id DESC | LIMIT 10"; performRequestWithRemoteSearchUser(esqlRequest(q2)); }); - assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(error.getMessage(), containsString("unable to find apikey")); + + if (skipUnavailable == false) { + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(error.getMessage(), containsString("unable to find apikey")); + } else { + assertThat(error.getResponse().getStatusLine().getStatusCode(), equalTo(500)); + assertThat(error.getMessage(), containsString("Unable to connect to [invalid_remote]")); + } } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java index a5ffeacf28112..793313e238651 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityFcActionAuthorizationIT.java @@ -47,6 +47,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.RemoteConnectionInfo; import org.elasticsearch.xpack.ccr.action.repositories.ClearCcrRestoreSessionAction; @@ -82,6 +83,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class RemoteClusterSecurityFcActionAuthorizationIT extends ESRestTestCase { @@ -176,7 +178,9 @@ public void testIndicesPrivilegesAreEnforcedForCcrRestoreSessionActions() throws } // Simulate QC behaviours by directly connecting to the FC using a transport service - try (MockTransportService service = startTransport("node", threadPool, (String) crossClusterApiKeyMap.get("encoded"))) { + final String apiKey = (String) crossClusterApiKeyMap.get("encoded"); + final boolean skipUnavailable = randomBoolean(); + try (MockTransportService service = startTransport("node", threadPool, apiKey, skipUnavailable)) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final List remoteConnectionInfos = remoteClusterService.getRemoteConnectionInfos().toList(); assertThat(remoteConnectionInfos, hasSize(1)); @@ -328,28 +332,35 @@ public void testRestApiKeyIsNotAllowedOnRemoteClusterPort() throws IOException { final Response createApiKeyResponse = adminClient().performRequest(createApiKeyRequest); assertOK(createApiKeyResponse); final Map apiKeyMap = responseAsMap(createApiKeyResponse); - try (MockTransportService service = startTransport("node", threadPool, (String) apiKeyMap.get("encoded"))) { + final String apiKey = (String) apiKeyMap.get("encoded"); + final boolean skipUnavailable = randomBoolean(); + try (MockTransportService service = startTransport("node", threadPool, apiKey, skipUnavailable)) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); final var remoteClusterClient = remoteClusterService.getRemoteClusterClient( "my_remote_cluster", EsExecutors.DIRECT_EXECUTOR_SERVICE, RemoteClusterService.DisconnectedStrategy.RECONNECT_UNLESS_SKIP_UNAVAILABLE ); - - final ElasticsearchSecurityException e = expectThrows( - ElasticsearchSecurityException.class, + final Exception e = expectThrows( + Exception.class, () -> executeRemote( remoteClusterClient, RemoteClusterNodesAction.REMOTE_TYPE, RemoteClusterNodesAction.Request.REMOTE_CLUSTER_SERVER_NODES ) ); - assertThat( - e.getMessage(), - containsString( - "authentication expected API key type of [cross_cluster], but API key [" + apiKeyMap.get("id") + "] has type [rest]" - ) - ); + if (skipUnavailable) { + assertThat(e, instanceOf(ConnectTransportException.class)); + assertThat(e.getMessage(), containsString("Unable to connect to [my_remote_cluster]")); + } else { + assertThat(e, instanceOf(ElasticsearchSecurityException.class)); + assertThat( + e.getMessage(), + containsString( + "authentication expected API key type of [cross_cluster], but API key [" + apiKeyMap.get("id") + "] has type [rest]" + ) + ); + } } } @@ -392,12 +403,14 @@ public void testUpdateCrossClusterApiKey() throws Exception { final FieldCapabilitiesRequest request = new FieldCapabilitiesRequest().indices("index").fields("name"); // Perform cross-cluster requests + boolean skipUnavailable = randomBoolean(); try ( MockTransportService service = startTransport( "node", threadPool, (String) crossClusterApiKeyMap.get("encoded"), - Map.of(TransportFieldCapabilitiesAction.NAME, crossClusterAccessSubjectInfo) + Map.of(TransportFieldCapabilitiesAction.NAME, crossClusterAccessSubjectInfo), + skipUnavailable ) ) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -508,7 +521,8 @@ public void testMalformedShardLevelActionIsRejected() throws Exception { "node", threadPool, (String) crossClusterApiKeyMap.get("encoded"), - Map.of(TransportGetAction.TYPE.name() + "[s]", buildCrossClusterAccessSubjectInfo(indexA)) + Map.of(TransportGetAction.TYPE.name() + "[s]", buildCrossClusterAccessSubjectInfo(indexA)), + randomBoolean() ) ) { final RemoteClusterService remoteClusterService = service.getRemoteClusterService(); @@ -552,15 +566,21 @@ private static CrossClusterAccessSubjectInfo buildCrossClusterAccessSubjectInfo( ); } - private static MockTransportService startTransport(final String nodeName, final ThreadPool threadPool, String encodedApiKey) { - return startTransport(nodeName, threadPool, encodedApiKey, Map.of()); + private static MockTransportService startTransport( + final String nodeName, + final ThreadPool threadPool, + String encodedApiKey, + boolean skipUnavailable + ) { + return startTransport(nodeName, threadPool, encodedApiKey, Map.of(), skipUnavailable); } private static MockTransportService startTransport( final String nodeName, final ThreadPool threadPool, String encodedApiKey, - Map subjectInfoLookup + Map subjectInfoLookup, + boolean skipUnavailable ) { final String remoteClusterServerEndpoint = testCluster.getRemoteClusterServerEndpoint(0); @@ -573,9 +593,11 @@ private static MockTransportService startTransport( builder.setSecureSettings(secureSettings); if (randomBoolean()) { builder.put("cluster.remote.my_remote_cluster.mode", "sniff") + .put("cluster.remote.my_remote_cluster.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.my_remote_cluster.seeds", remoteClusterServerEndpoint); } else { builder.put("cluster.remote.my_remote_cluster.mode", "proxy") + .put("cluster.remote.my_remote_cluster.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.my_remote_cluster.proxy_address", remoteClusterServerEndpoint); } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java index 29afda08500ca..c791752e76de0 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java @@ -105,9 +105,11 @@ protected void configureRemoteCluster(boolean isProxyMode) throws Exception { final Settings.Builder builder = Settings.builder(); if (isProxyMode) { builder.put("cluster.remote.my_remote_cluster.mode", "proxy") + .put("cluster.remote.my_remote_cluster.skip_unavailable", "false") .put("cluster.remote.my_remote_cluster.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)); } else { builder.put("cluster.remote.my_remote_cluster.mode", "sniff") + .put("cluster.remote.my_remote_cluster.skip_unavailable", "false") .putList("cluster.remote.my_remote_cluster.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)); } updateClusterSettings(builder.build()); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java index d1e78d4f3ad39..c6bb6e10f0537 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.remotecluster; +import org.apache.http.util.EntityUtils; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -331,66 +332,108 @@ public void testCrossClusterSearch() throws Exception { ) ); - // Check that authentication fails if we use a non-existent API key + // Check that authentication fails if we use a non-existent API key (when skip_unavailable=false) + boolean skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_remote.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.invalid_remote.mode", "proxy") + .put("cluster.remote.invalid_remote.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_remote.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception4 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")) - ); - assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(exception4.getMessage(), containsString("unable to find apikey")); + if (skipUnavailable) { + /* + when skip_unavailable=true, response should be something like: + {"took":1,"timed_out":false,"num_reduce_phases":0,"_shards":{"total":0,"successful":0,"skipped":0,"failed":0}, + "_clusters":{"total":1,"successful":0,"skipped":1,"running":0,"partial":0,"failed":0, + "details":{"invalid_remote":{"status":"skipped","indices":"index1","timed_out":false, + "failures":[{"shard":-1,"index":null,"reason":{"type":"connect_transport_exception", + "reason":"Unable to connect to [invalid_remote]"}}]}}}, + "hits":{"total":{"value":0,"relation":"eq"},"max_score":null,"hits":[]}} + */ + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception4 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")) + ); + assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(exception4.getMessage(), containsString("unable to find apikey")); + } - // check that REST API key is not supported by cross cluster access + // check that REST API key is not supported by cross cluster access (when skip_unavailable=false) + skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.wrong_api_key_type.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.wrong_api_key_type.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.wrong_api_key_type.mode", "proxy") + .put("cluster.remote.wrong_api_key_type.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.wrong_api_key_type.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception5 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")) - ); - assertThat(exception5.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat( - exception5.getMessage(), - containsString( - "authentication expected API key type of [cross_cluster], but API key [" - + REST_API_KEY_MAP_REF.get().get("id") - + "] has type [rest]" - ) - ); + if (skipUnavailable) { + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception5 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/wrong_api_key_type:*/_search")) + ); + assertThat(exception5.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat( + exception5.getMessage(), + containsString( + "authentication expected API key type of [cross_cluster], but API key [" + + REST_API_KEY_MAP_REF.get().get("id") + + "] has type [rest]" + ) + ); + } - // Check invalid cross-cluster API key length is rejected + // Check invalid cross-cluster API key length is rejected (and gets security error when skip_unavailable=false) + skipUnavailable = randomBoolean(); updateClusterSettings( randomBoolean() ? Settings.builder() .put("cluster.remote.invalid_secret_length.seeds", fulfillingCluster.getRemoteClusterServerEndpoint(0)) + .put("cluster.remote.invalid_secret_length.skip_unavailable", Boolean.toString(skipUnavailable)) .build() : Settings.builder() .put("cluster.remote.invalid_secret_length.mode", "proxy") + .put("cluster.remote.invalid_secret_length.skip_unavailable", Boolean.toString(skipUnavailable)) .put("cluster.remote.invalid_secret_length.proxy_address", fulfillingCluster.getRemoteClusterServerEndpoint(0)) .build() ); - final ResponseException exception6 = expectThrows( - ResponseException.class, - () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")) - ); - assertThat(exception6.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(exception6.getMessage(), containsString("invalid cross-cluster API key value")); + if (skipUnavailable) { + Response invalidRemoteResponse = performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")); + assertThat(invalidRemoteResponse.getStatusLine().getStatusCode(), equalTo(200)); + String responseJson = EntityUtils.toString(invalidRemoteResponse.getEntity()); + assertThat(responseJson, containsString("\"status\":\"skipped\"")); + assertThat(responseJson, containsString("connect_transport_exception")); + } else { + final ResponseException exception6 = expectThrows( + ResponseException.class, + () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_secret_length:*/_search")) + ); + assertThat(exception6.getResponse().getStatusLine().getStatusCode(), equalTo(401)); + assertThat(exception6.getMessage(), containsString("invalid cross-cluster API key value")); + } } } diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle index 87db264356484..ca44d7fe6a85c 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle @@ -45,6 +45,7 @@ def queryingCluster = testClusters.register('querying-cluster') { setting 'cluster.remote.connections_per_cluster', "1" user username: "test_user", password: "x-pack-test-password" + setting 'cluster.remote.my_remote_cluster.skip_unavailable', 'false' if (proxyMode) { setting 'cluster.remote.my_remote_cluster.mode', 'proxy' setting 'cluster.remote.my_remote_cluster.proxy_address', { From a4cfd02baba77e892adc18d5b5fdda42eba95d64 Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 29 Apr 2024 16:57:56 -0600 Subject: [PATCH 035/244] Mute HashLookupOperatorTests testSimpleToString (#108047) See https://github.com/elastic/elasticsearch/issues/108045 --- .../compute/operator/AnyOperatorTestCase.java | 2 +- .../compute/operator/HashLookupOperatorTests.java | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java index 25d79d0808741..9e0a6470e14c6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java @@ -68,7 +68,7 @@ public final void testSimpleDescription() { /** * Makes sure the description of {@link #simple} matches the {@link #expectedDescriptionOfSimple}. */ - public final void testSimpleToString() { + public void testSimpleToString() { try (Operator operator = simple().get(driverContext())) { assertThat(operator.toString(), equalTo(expectedToStringOfSimple())); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java index 711800197aa03..31d3764ac67fc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashLookupOperatorTests.java @@ -88,4 +88,11 @@ protected String expectedDescriptionOfSimple() { protected String expectedToStringOfSimple() { return "HashLookup[keys=[foo], hash=PackedValuesBlockHash{groups=[0:LONG], entries=4, size=544b}, mapping=[0]]"; } + + @Override + // when you remove this AwaitsFix, also make this method in the superclass final again + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108045") + public void testSimpleToString() { + super.testSimpleToString(); + } } From 10139a30ed1a4caa86b543f14b8417594306daf5 Mon Sep 17 00:00:00 2001 From: Athena Brown Date: Mon, 29 Apr 2024 17:53:20 -0600 Subject: [PATCH 036/244] Refactor IDP plugin to use template registry (#107882) This started as removing references to `Version`, but the simplest way to do that turned out to be to just convert it to use template registries instead of this custom management. --- .../xpack/core/template/TemplateUtils.java | 100 ------------------ .../idp/saml-service-provider-template.json | 4 +- .../sp/SamlServiceProviderIndexTests.java | 59 ++++------- .../xpack/idp/IdentityProviderPlugin.java | 12 ++- .../idp/saml/sp/SamlServiceProviderIndex.java | 72 ++----------- ...lServiceProviderIndexTemplateRegistry.java | 56 ++++++++++ 6 files changed, 96 insertions(+), 207 deletions(-) create mode 100644 x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java index bae2d530a21a4..ba9639d3d5156 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/TemplateUtils.java @@ -6,28 +6,16 @@ */ package org.elasticsearch.xpack.core.template; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.template.resources.TemplateResources; -import java.io.IOException; import java.util.Collections; import java.util.Map; -import java.util.function.Predicate; - -import static org.elasticsearch.common.xcontent.XContentHelper.convertToMap; /** * Handling versioned templates for time-based indices in x-pack @@ -36,28 +24,6 @@ public class TemplateUtils { private TemplateUtils() {} - /** - * Loads a JSON template as a resource and puts it into the provided map - */ - public static void loadLegacyTemplateIntoMap( - String resource, - Map map, - String templateName, - String version, - String versionProperty, - Logger logger - ) { - final String template = loadTemplate(resource, version, versionProperty); - try ( - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, template) - ) { - map.put(templateName, IndexTemplateMetadata.Builder.fromXContent(parser, templateName)); - } catch (IOException e) { - // TODO: should we handle this with a thrown exception? - logger.error("Error loading template [{}] as part of metadata upgrading", templateName); - } - } - /** * Loads a built-in template and returns its source. */ @@ -126,70 +92,4 @@ public static boolean checkTemplateExistsAndVersionIsGTECurrentVersion(String te return templateMetadata.version() != null && templateMetadata.version() >= currentVersion; } - - /** - * Checks if a versioned template exists, and if it exists checks if it is up-to-date with current version. - * @param versionKey The property in the mapping's _meta field which stores the version info - * @param templateName Name of the index template - * @param state Cluster state - * @param logger Logger - */ - public static boolean checkTemplateExistsAndIsUpToDate(String templateName, String versionKey, ClusterState state, Logger logger) { - - return checkTemplateExistsAndVersionMatches(templateName, versionKey, state, logger, Version.CURRENT::equals); - } - - /** - * Checks if template with given name exists and if it matches the version predicate given - * @param versionKey The property in the mapping's _meta field which stores the version info - * @param templateName Name of the index template - * @param state Cluster state - * @param logger Logger - * @param predicate Predicate to execute on version check - */ - public static boolean checkTemplateExistsAndVersionMatches( - String templateName, - String versionKey, - ClusterState state, - Logger logger, - Predicate predicate - ) { - - IndexTemplateMetadata templateMeta = state.metadata().templates().get(templateName); - if (templateMeta == null) { - return false; - } - CompressedXContent mappings = templateMeta.getMappings(); - - // check all mappings contain correct version in _meta - // we have to parse the source here which is annoying - if (mappings != null) { - try { - Map typeMappingMap = convertToMap(mappings.uncompressed(), false, XContentType.JSON).v2(); - // should always contain one entry with key = typename - assert (typeMappingMap.size() == 1); - String key = typeMappingMap.keySet().iterator().next(); - // get the actual mapping entries - @SuppressWarnings("unchecked") - Map mappingMap = (Map) typeMappingMap.get(key); - if (containsCorrectVersion(versionKey, mappingMap, predicate) == false) { - return false; - } - } catch (ElasticsearchParseException e) { - logger.error(() -> "Cannot parse the template [" + templateName + "]", e); - throw new IllegalStateException("Cannot parse the template " + templateName, e); - } - } - return true; - } - - private static boolean containsCorrectVersion(String versionKey, Map typeMappingMap, Predicate predicate) { - @SuppressWarnings("unchecked") - Map meta = (Map) typeMappingMap.get("_meta"); - if (meta == null) { - // pre 5.0, cannot be up to date - return false; - } - return predicate.test(Version.fromString((String) meta.get(versionKey))); - } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json index dd69b9cecefc5..0e82cc0f2a6df 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json @@ -13,10 +13,12 @@ "index.priority": 10, "index.format": 1 }, + "version": ${idp.template.version}, "mappings": { "_doc": { "_meta": { - "idp-version": "${idp.template.version}" + "idp-version": "${idp.template.version_deprecated}", + "idp-template-version": "${idp.template.version}" }, "dynamic": "strict", "properties": { diff --git a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java index 10d636c0cf851..39b04d8915b89 100644 --- a/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java +++ b/x-pack/plugin/identity-provider/src/internalClusterTest/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; @@ -30,7 +29,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Locale; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -53,7 +51,7 @@ public class SamlServiceProviderIndexTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return List.of(LocalStateCompositeXPackPlugin.class, IdentityProviderPlugin.class); + return List.of(LocalStateCompositeXPackPlugin.class, IdentityProviderPlugin.class, IndexTemplateRegistryPlugin.class); } @Override @@ -82,11 +80,6 @@ public void testWriteAndFindServiceProvidersFromIndex() { final int count = randomIntBetween(3, 5); List documents = new ArrayList<>(count); - // Install the template - assertTrue("Template should have been installed", installTemplate()); - // No need to install it again - assertFalse("Template should not have been installed a second time", installTemplate()); - // Index should not exist yet assertThat(clusterService.state().metadata().index(SamlServiceProviderIndex.INDEX_NAME), nullValue()); @@ -128,7 +121,6 @@ public void testWriteAndFindServiceProvidersFromIndex() { } public void testWritesViaAliasIfItExists() { - assertTrue(installTemplate()); // Create an index that will trigger the template, but isn't the standard index name final String customIndexName = SamlServiceProviderIndex.INDEX_NAME + "-test"; @@ -155,38 +147,6 @@ public void testWritesViaAliasIfItExists() { assertThat(readDocument(document.docId), equalTo(document)); } - public void testInstallTemplateAutomaticallyOnClusterChange() throws Exception { - // Create an index that will trigger a cluster state change - final String indexName = randomAlphaOfLength(7).toLowerCase(Locale.ROOT); - indicesAdmin().create(new CreateIndexRequest(indexName)).actionGet(); - - ensureGreen(indexName); - - IndexTemplateMetadata templateMeta = clusterService.state().metadata().templates().get(SamlServiceProviderIndex.TEMPLATE_NAME); - - assertBusy(() -> assertThat("template should have been installed", templateMeta, notNullValue())); - - assertFalse("Template is already installed, should not install again", installTemplate()); - } - - public void testInstallTemplateAutomaticallyOnDocumentWrite() { - final SamlServiceProviderDocument doc = randomDocument(1); - writeDocument(doc); - - assertThat(readDocument(doc.docId), equalTo(doc)); - - IndexTemplateMetadata templateMeta = clusterService.state().metadata().templates().get(SamlServiceProviderIndex.TEMPLATE_NAME); - assertThat("template should have been installed", templateMeta, notNullValue()); - - assertFalse("Template is already installed, should not install again", installTemplate()); - } - - private boolean installTemplate() { - final PlainActionFuture installTemplate = new PlainActionFuture<>(); - serviceProviderIndex.installIndexTemplate(assertListenerIsOnlyCalledOnce(installTemplate)); - return installTemplate.actionGet(); - } - private Set getAllDocs() { final PlainActionFuture> future = new PlainActionFuture<>(); serviceProviderIndex.findAll( @@ -264,4 +224,21 @@ private static ActionListener assertListenerIsOnlyCalledOnce(ActionListen }); } + // Since we just want to test the template handling in this test suite, we don't need to go through + // all the hassle of the setup required to *actually* enable the plugin (we do that elsewhere), we + // just need to make sure the template registry is here. + public static class IndexTemplateRegistryPlugin extends Plugin { + @Override + public Collection createComponents(PluginServices services) { + var indexTemplateRegistry = new SamlServiceProviderIndexTemplateRegistry( + services.environment().settings(), + services.clusterService(), + services.threadPool(), + services.client(), + services.xContentRegistry() + ); + indexTemplateRegistry.initialize(); + return List.of(indexTemplateRegistry); + } + } } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java index e493c8e61ca58..5e6bc5f703879 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.idp.saml.rest.action.RestSamlValidateAuthenticationRequestAction; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderFactory; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex; +import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndexTemplateRegistry; import org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderResolver; import org.elasticsearch.xpack.idp.saml.sp.ServiceProviderCacheSettings; import org.elasticsearch.xpack.idp.saml.sp.ServiceProviderDefaults; @@ -80,6 +81,15 @@ public Collection createComponents(PluginServices services) { return List.of(); } + var indexTemplateRegistry = new SamlServiceProviderIndexTemplateRegistry( + services.environment().settings(), + services.clusterService(), + services.threadPool(), + services.client(), + services.xContentRegistry() + ); + indexTemplateRegistry.initialize(); + SamlInit.initialize(); final SamlServiceProviderIndex index = new SamlServiceProviderIndex(services.client(), services.clusterService()); final SecurityContext securityContext = new SecurityContext(settings, services.threadPool().getThreadContext()); @@ -111,7 +121,7 @@ public Collection createComponents(PluginServices services) { final SamlFactory factory = new SamlFactory(); - return List.of(index, idp, factory, userPrivilegeResolver); + return List.of(index, idp, factory, userPrivilegeResolver, indexTemplateRegistry); } @Override diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java index bd425487b9ad0..1eb6c5586a48b 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndex.java @@ -9,12 +9,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; @@ -35,7 +33,6 @@ import org.elasticsearch.common.util.CachedSupplier; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.QueryBuilder; @@ -46,7 +43,6 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.template.TemplateUtils; import java.io.ByteArrayOutputStream; import java.io.Closeable; @@ -70,15 +66,19 @@ public class SamlServiceProviderIndex implements Closeable { private final ClusterService clusterService; private final ClusterStateListener clusterStateListener; private volatile boolean aliasExists; - private volatile boolean templateInstalled; public static final String ALIAS_NAME = "saml-service-provider"; public static final String INDEX_NAME = "saml-service-provider-v1"; static final String TEMPLATE_NAME = ALIAS_NAME; - private static final String TEMPLATE_RESOURCE = "/idp/saml-service-provider-template.json"; - private static final String TEMPLATE_META_VERSION_KEY = "idp-version"; - private static final String TEMPLATE_VERSION_SUBSTITUTE = "idp.template.version"; + static final String TEMPLATE_RESOURCE = "/idp/saml-service-provider-template.json"; + static final String TEMPLATE_VERSION_VARIABLE = "idp.template.version"; + + // This field is only populated with an old-school version string for BWC purposes + static final String TEMPLATE_VERSION_STRING_DEPRECATED = "idp.template.version_deprecated"; + static final String FINAL_TEMPLATE_VERSION_STRING_DEPRECATED = "8.14.0"; + + static final int CURRENT_TEMPLATE_VERSION = 1; public static final class DocumentVersion { public final String id; @@ -140,34 +140,9 @@ public SamlServiceProviderIndex(Client client, ClusterService clusterService) { private void clusterChanged(ClusterChangedEvent clusterChangedEvent) { final ClusterState state = clusterChangedEvent.state(); - installTemplateIfRequired(state); checkForAliasStateChange(state); } - private void installTemplateIfRequired(ClusterState state) { - if (templateInstalled) { - return; - } - if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - return; - } - if (isTemplateUpToDate(state)) { - templateInstalled = true; - return; - } - if (state.nodes().isLocalNodeElectedMaster() == false) { - return; - } - installIndexTemplate(ActionListener.wrap(installed -> { - templateInstalled = true; - if (installed) { - logger.debug("Template [{}] has been updated", TEMPLATE_NAME); - } else { - logger.debug("Template [{}] appears to be up to date", TEMPLATE_NAME); - } - }, e -> logger.warn(() -> "Failed to install template [" + TEMPLATE_NAME + "]", e))); - } - private void checkForAliasStateChange(ClusterState state) { final IndexAbstraction aliasInfo = state.getMetadata().getIndicesLookup().get(ALIAS_NAME); final boolean previousState = aliasExists; @@ -199,24 +174,6 @@ private void logChangedAliasState(IndexAbstraction aliasInfo) { } } - public void installIndexTemplate(ActionListener listener) { - final ClusterState state = clusterService.state(); - if (isTemplateUpToDate(state)) { - listener.onResponse(false); - return; - } - final String template = TemplateUtils.loadTemplate(TEMPLATE_RESOURCE, Version.CURRENT.toString(), TEMPLATE_VERSION_SUBSTITUTE); - final PutIndexTemplateRequest request = new PutIndexTemplateRequest(TEMPLATE_NAME).source(template, XContentType.JSON); - client.admin().indices().putTemplate(request, listener.delegateFailureAndWrap((l, response) -> { - logger.info("Installed template [{}]", TEMPLATE_NAME); - l.onResponse(true); - })); - } - - private boolean isTemplateUpToDate(ClusterState state) { - return TemplateUtils.checkTemplateExistsAndIsUpToDate(TEMPLATE_NAME, TEMPLATE_META_VERSION_KEY, state, logger); - } - public void deleteDocument(DocumentVersion version, WriteRequest.RefreshPolicy refreshPolicy, ActionListener listener) { final DeleteRequest request = new DeleteRequest(aliasExists ? ALIAS_NAME : INDEX_NAME).id(version.id) .setIfSeqNo(version.seqNo) @@ -240,19 +197,6 @@ public void writeDocument( return; } - if (templateInstalled) { - _writeDocument(document, opType, refreshPolicy, listener); - } else { - installIndexTemplate(listener.delegateFailureAndWrap((l, installed) -> _writeDocument(document, opType, refreshPolicy, l))); - } - } - - private void _writeDocument( - SamlServiceProviderDocument document, - DocWriteRequest.OpType opType, - WriteRequest.RefreshPolicy refreshPolicy, - ActionListener listener - ) { try ( ByteArrayOutputStream out = new ByteArrayOutputStream(); XContentBuilder xContentBuilder = new XContentBuilder(XContentType.JSON.xContent(), out) diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java new file mode 100644 index 0000000000000..bd6bdbabbd4f2 --- /dev/null +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderIndexTemplateRegistry.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.idp.saml.sp; + +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.core.template.IndexTemplateConfig; +import org.elasticsearch.xpack.core.template.IndexTemplateRegistry; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.CURRENT_TEMPLATE_VERSION; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.FINAL_TEMPLATE_VERSION_STRING_DEPRECATED; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_NAME; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_RESOURCE; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_VERSION_STRING_DEPRECATED; +import static org.elasticsearch.xpack.idp.saml.sp.SamlServiceProviderIndex.TEMPLATE_VERSION_VARIABLE; + +public class SamlServiceProviderIndexTemplateRegistry extends IndexTemplateRegistry { + public SamlServiceProviderIndexTemplateRegistry( + Settings nodeSettings, + ClusterService clusterService, + ThreadPool threadPool, + Client client, + NamedXContentRegistry xContentRegistry + ) { + super(nodeSettings, clusterService, threadPool, client, xContentRegistry); + } + + @Override + protected String getOrigin() { + return "idp"; + } + + @Override + protected List getLegacyTemplateConfigs() { + return List.of( + new IndexTemplateConfig( + TEMPLATE_NAME, + TEMPLATE_RESOURCE, + CURRENT_TEMPLATE_VERSION, + TEMPLATE_VERSION_VARIABLE, + Map.of(TEMPLATE_VERSION_STRING_DEPRECATED, FINAL_TEMPLATE_VERSION_STRING_DEPRECATED) + ) + ); + } +} From 1c6119fa91327fb0db791f39015d6b89b132fa10 Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Tue, 30 Apr 2024 08:29:34 +0200 Subject: [PATCH 037/244] Introduce skipping on known_issues and awaits_fix for YAML tests (#107836) See README on YAML REST tests for detailed instructions. --- .../yaml/section/PrerequisiteSection.java | 302 ++++++++++-------- .../test/rest/yaml/section/Prerequisites.java | 5 + .../section/ClientYamlTestSectionTests.java | 2 +- .../section/ClientYamlTestSuiteTests.java | 30 ++ .../section/PrerequisiteSectionTests.java | 90 +++++- 5 files changed, 295 insertions(+), 134 deletions(-) diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java index f4c9aaa619911..1ee447da1f111 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.Features; import org.elasticsearch.xcontent.XContentLocation; @@ -20,9 +21,13 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.function.Consumer; import java.util.function.Predicate; +import static java.util.Collections.emptyList; + /** * Represents a section where prerequisites to run a specific test section or suite are specified. It is possible to specify preconditions * as a set of `skip` criteria (the test or suite will be skipped if the specified conditions are met) or `requires` criteria (the test or @@ -34,13 +39,18 @@ * - an operating system (full name, including specific Linux distributions) - some OS might show a certain behavior */ public class PrerequisiteSection { + record KnownIssue(String clusterFeature, String fixedBy) { + private static final Set FIELD_NAMES = Set.of("cluster_feature", "fixed_by"); + } + static class PrerequisiteSectionBuilder { String skipVersionRange = null; String skipReason = null; String requiresReason = null; List requiredYamlRunnerFeatures = new ArrayList<>(); List skipOperatingSystems = new ArrayList<>(); - + List skipKnownIssues = new ArrayList<>(); + String skipAwaitsFix = null; Set skipClusterFeatures = new HashSet<>(); Set requiredClusterFeatures = new HashSet<>(); @@ -53,6 +63,11 @@ enum XPackRequired { XPackRequired xpackRequired = XPackRequired.NOT_SPECIFIED; + public PrerequisiteSectionBuilder skipIfAwaitsFix(String bugUrl) { + this.skipAwaitsFix = bugUrl; + return this; + } + public PrerequisiteSectionBuilder skipIfVersion(String skipVersionRange) { this.skipVersionRange = skipVersionRange; return this; @@ -96,6 +111,11 @@ public PrerequisiteSectionBuilder skipIfClusterFeature(String featureName) { return this; } + public PrerequisiteSectionBuilder skipKnownIssue(KnownIssue knownIssue) { + skipKnownIssues.add(knownIssue); + return this; + } + public PrerequisiteSectionBuilder requireClusterFeature(String featureName) { requiredClusterFeatures.add(featureName); return this; @@ -107,29 +127,30 @@ public PrerequisiteSectionBuilder skipIfOs(String osName) { } void validate(XContentLocation contentLocation) { - if ((Strings.hasLength(skipVersionRange) == false) + if ((Strings.isEmpty(skipVersionRange)) && requiredYamlRunnerFeatures.isEmpty() && skipOperatingSystems.isEmpty() && xpackRequired == XPackRequired.NOT_SPECIFIED && requiredClusterFeatures.isEmpty() - && skipClusterFeatures.isEmpty()) { - throw new ParsingException( - contentLocation, - "at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section" - ); + && skipClusterFeatures.isEmpty() + && skipKnownIssues.isEmpty() + && Strings.isEmpty(skipAwaitsFix)) { + // TODO separate the validation for requires / skip when dropping parsing of legacy fields, e.g. features in skip + throw new ParsingException(contentLocation, "at least one predicate is mandatory within a skip or requires section"); } - if (Strings.hasLength(skipVersionRange) && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); - } - if (skipOperatingSystems.isEmpty() == false && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip os section"); - } - if (skipClusterFeatures.isEmpty() == false && Strings.hasLength(skipReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip cluster_features section"); + + if (Strings.isEmpty(skipReason) + && (Strings.isEmpty(skipVersionRange) + && skipOperatingSystems.isEmpty() + && skipClusterFeatures.isEmpty() + && skipKnownIssues.isEmpty()) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within this skip section"); } - if (requiredClusterFeatures.isEmpty() == false && Strings.hasLength(requiresReason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within requires cluster_features section"); + + if (Strings.isEmpty(requiresReason) && (requiredClusterFeatures.isEmpty() == false)) { + throw new ParsingException(contentLocation, "reason is mandatory within this requires section"); } + // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os if (skipOperatingSystems.isEmpty() == false && requiredYamlRunnerFeatures.contains("skip_os") == false) { throw new ParsingException(contentLocation, "if os is specified, test runner feature [skip_os] must be set"); @@ -143,33 +164,49 @@ void validate(XContentLocation contentLocation) { } public PrerequisiteSection build() { - final List> skipCriteriaList = new ArrayList<>(); - final List> requiresCriteriaList; - - // Check if the test runner supports all YAML framework features (see {@link Features}). If not, default to always skip this - // section. if (Features.areAllSupported(requiredYamlRunnerFeatures) == false) { - requiresCriteriaList = List.of(Prerequisites.FALSE); - } else { - requiresCriteriaList = new ArrayList<>(); - if (xpackRequired == XPackRequired.YES) { - requiresCriteriaList.add(Prerequisites.hasXPack()); - } - if (xpackRequired == XPackRequired.NO) { - skipCriteriaList.add(Prerequisites.hasXPack()); - } - if (Strings.hasLength(skipVersionRange)) { - skipCriteriaList.add(Prerequisites.skipOnVersionRange(skipVersionRange)); - } - if (skipOperatingSystems.isEmpty() == false) { - skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); - } - if (requiredClusterFeatures.isEmpty() == false) { - requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); - } - if (skipClusterFeatures.isEmpty() == false) { - skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); - } + // always skip this section due to missing required test runner features (see {@link Features}) + return new PrerequisiteSection( + emptyList(), + skipReason, + List.of(Prerequisites.FALSE), + requiresReason, + requiredYamlRunnerFeatures + ); + } + if (Strings.hasLength(skipAwaitsFix)) { + // always skip this section due to a pending fix + return new PrerequisiteSection( + List.of(Prerequisites.TRUE), + skipReason, + emptyList(), + requiresReason, + requiredYamlRunnerFeatures + ); + } + + final List> skipCriteriaList = new ArrayList<>(); + final List> requiresCriteriaList = new ArrayList<>(); + if (xpackRequired == XPackRequired.YES) { + requiresCriteriaList.add(Prerequisites.hasXPack()); + } + if (xpackRequired == XPackRequired.NO) { + skipCriteriaList.add(Prerequisites.hasXPack()); + } + if (Strings.hasLength(skipVersionRange)) { + skipCriteriaList.add(Prerequisites.skipOnVersionRange(skipVersionRange)); + } + if (skipOperatingSystems.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); + } + if (requiredClusterFeatures.isEmpty() == false) { + requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); + } + if (skipClusterFeatures.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); + } + if (skipKnownIssues.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnKnownIssue(skipKnownIssues)); } return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, requiresReason, requiredYamlRunnerFeatures); } @@ -228,97 +265,106 @@ private static void parseFeatureField(String feature, PrerequisiteSectionBuilder // package private for tests static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException( - "Expected [" - + XContentParser.Token.START_OBJECT - + ", found [" - + parser.currentToken() - + "], the skip section is not properly indented" - ); - } - String currentFieldName = null; - XContentParser.Token token; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("version".equals(currentFieldName)) { - builder.skipIfVersion(parser.text()); - } else if ("reason".equals(currentFieldName)) { - builder.setSkipReason(parser.text()); - } else if ("features".equals(currentFieldName)) { - parseFeatureField(parser.text(), builder); - } else if ("os".equals(currentFieldName)) { - builder.skipIfOs(parser.text()); - } else if ("cluster_features".equals(currentFieldName)) { - builder.skipIfClusterFeature(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "field " + currentFieldName + " not supported within skip section" - ); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parseFeatureField(parser.text(), builder); - } - } else if ("os".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.skipIfOs(parser.text()); - } - } else if ("cluster_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.skipIfClusterFeature(parser.text()); - } - } + requireStartObject("skip", parser.nextToken()); + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) continue; + + boolean valid = false; + if (parser.currentToken().isValue()) { + valid = switch (parser.currentName()) { + case "version" -> parseString(parser, builder::skipIfVersion); + case "reason" -> parseString(parser, builder::setSkipReason); + case "features" -> parseString(parser, f -> parseFeatureField(f, builder)); + case "os" -> parseString(parser, builder::skipIfOs); + case "cluster_features" -> parseString(parser, builder::skipIfClusterFeature); + case "awaits_fix" -> parseString(parser, builder::skipIfAwaitsFix); + default -> false; + }; + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + valid = switch (parser.currentName()) { + case "features" -> parseStrings(parser, f -> parseFeatureField(f, builder)); + case "os" -> parseStrings(parser, builder::skipIfOs); + case "cluster_features" -> parseStrings(parser, builder::skipIfClusterFeature); + case "known_issues" -> parseArray(parser, PrerequisiteSection::parseKnownIssue, builder::skipKnownIssue); + default -> false; + }; } + if (valid == false) throwUnexpectedField("skip", parser); } parser.nextToken(); } - static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + private static void throwUnexpectedField(String section, XContentParser parser) throws IOException { + throw new ParsingException( + parser.getTokenLocation(), + Strings.format("field [%s] of type [%s] not supported within %s section", parser.currentName(), parser.currentToken(), section) + ); + } + + private static void requireStartObject(String section, XContentParser.Token token) throws IOException { + if (token != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException( - "Expected [" - + XContentParser.Token.START_OBJECT - + ", found [" - + parser.currentToken() - + "], the requires section is not properly indented" + Strings.format( + "Expected [%s], found [%s], the %s section is not properly indented", + XContentParser.Token.START_OBJECT, + token, + section + ) + ); + } + } + + private static boolean parseString(XContentParser parser, Consumer consumer) throws IOException { + consumer.accept(parser.text()); + return true; + } + + private static boolean parseStrings(XContentParser parser, Consumer consumer) throws IOException { + return parseArray(parser, XContentParser::text, consumer); + } + + private static boolean parseArray(XContentParser parser, CheckedFunction item, Consumer consumer) + throws IOException { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + consumer.accept(item.apply(parser)); + } + return true; + } + + private static KnownIssue parseKnownIssue(XContentParser parser) throws IOException { + Map fields = parser.mapStrings(); + if (fields.keySet().equals(KnownIssue.FIELD_NAMES) == false) { + throw new ParsingException( + parser.getTokenLocation(), + Strings.format("Expected fields %s, but got %s", KnownIssue.FIELD_NAMES, fields.keySet()) ); } - String currentFieldName = null; - XContentParser.Token token; - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("reason".equals(currentFieldName)) { - builder.setRequiresReason(parser.text()); - } else if ("test_runner_features".equals(currentFieldName)) { - parseFeatureField(parser.text(), builder); - } else if ("cluster_features".equals(currentFieldName)) { - builder.requireClusterFeature(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "field " + currentFieldName + " not supported within requires section" - ); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("test_runner_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parseFeatureField(parser.text(), builder); - } - } else if ("cluster_features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.requireClusterFeature(parser.text()); - } - } + return new KnownIssue(fields.get("cluster_feature"), fields.get("fixed_by")); + } + + static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { + requireStartObject("requires", parser.nextToken()); + + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) continue; + + boolean valid = false; + if (parser.currentToken().isValue()) { + valid = switch (parser.currentName()) { + case "reason" -> parseString(parser, builder::setRequiresReason); + case "test_runner_features" -> parseString(parser, f -> parseFeatureField(f, builder)); + case "cluster_features" -> parseString(parser, builder::requireClusterFeature); + default -> false; + }; + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + valid = switch (parser.currentName()) { + case "test_runner_features" -> parseStrings(parser, f -> parseFeatureField(f, builder)); + case "cluster_features" -> parseStrings(parser, builder::requireClusterFeature); + default -> false; + }; } + if (valid == false) throwUnexpectedField("requires", parser); } parser.nextToken(); } @@ -332,9 +378,9 @@ static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuild final String requireReason; private PrerequisiteSection() { - this.skipCriteriaList = new ArrayList<>(); - this.requiresCriteriaList = new ArrayList<>(); - this.yamlRunnerFeatures = new ArrayList<>(); + this.skipCriteriaList = emptyList(); + this.requiresCriteriaList = emptyList(); + this.yamlRunnerFeatures = emptyList(); this.skipReason = null; this.requireReason = null; } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java index 8049c227b199e..ca10101a4612c 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java @@ -44,4 +44,9 @@ static Predicate requireClusterFeatures(Set skipOnClusterFeatures(Set clusterFeatures) { return context -> clusterFeatures.stream().anyMatch(context::clusterHasFeature); } + + static Predicate skipOnKnownIssue(List knownIssues) { + return context -> knownIssues.stream() + .anyMatch(i -> context.clusterHasFeature(i.clusterFeature()) && context.clusterHasFeature(i.fixedBy()) == false); + } } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 2c6e7e30e0d46..108a85b978af3 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -33,7 +33,7 @@ public void testWrongIndentation() throws Exception { assertEquals("Error parsing test named [First test section]", e.getMessage()); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); assertEquals( - "Expected [START_OBJECT, found [VALUE_NULL], the skip section is not properly indented", + "Expected [START_OBJECT], found [VALUE_NULL], the skip section is not properly indented", e.getCause().getMessage() ); } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 1f5bdc71dde37..f8927f76c07ec 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -468,11 +468,41 @@ public void testParseSkipOs() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().hasYamlRunnerFeature("skip_os"), equalTo(true)); } + public void testMuteUsingAwaitsFix() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + "Mute": + + - skip: + awaits_fix: bugurl + + - do: + indices.get_mapping: + index: test_index + type: test_type + + - match: {test_type.properties.text.type: string} + - match: {test_type.properties.text.analyzer: whitespace} + """); + + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), Optional.empty(), parser); + + assertThat(restTestSuite, notNullValue()); + assertThat(restTestSuite.getName(), equalTo(getTestName())); + assertThat(restTestSuite.getFile().isPresent(), equalTo(false)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); + + assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Mute")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + } + public void testParseSkipAndRequireClusterFeatures() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ "Broken on some os": - skip: + known_issues: + - cluster_feature: buggy_feature + fixed_by: buggy_feature_fix cluster_features: [unsupported-feature1, unsupported-feature2] reason: "unsupported-features are not supported" - requires: diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java index 181ec34fefb7e..a77b2cc5b40f1 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.section.PrerequisiteSection.KnownIssue; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.yaml.YamlXContent; import org.junit.AssumptionViolatedException; @@ -34,6 +35,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.oneOf; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -151,6 +153,11 @@ public void testSkipTestFeaturesOverridesAnySkipCriteria() { assertFalse(section.requiresCriteriaMet(mockContext)); } + public void testSkipAwaitsFix() { + PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfAwaitsFix("bugurl").build(); + assertTrue(section.skipCriteriaMet(mock(ClientYamlTestExecutionContext.class))); + } + public void testSkipOs() { PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfOs("windows95") .skipIfOs("debian-5") @@ -306,6 +313,57 @@ public void testParseSkipSectionBothFeatureAndVersion() throws Exception { assertThat(skipSectionBuilder.skipReason, equalTo("Delete ignores the parent param")); } + public void testParseSkipSectionAwaitsFix() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + awaits_fix: "bugurl" + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder.skipAwaitsFix, is("bugurl")); + } + + public void testParseSkipSectionKnownIssues() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + reason: "skip known bugs" + known_issues: + - cluster_feature: feature1 + fixed_by: featureFix1 + - cluster_feature: feature2 + fixed_by: featureFix2"""); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder.skipReason, is("skip known bugs")); + assertThat( + skipSectionBuilder.skipKnownIssues, + contains( + new KnownIssue("feature1", "featureFix1"), // + new KnownIssue("feature2", "featureFix2") + ) + ); + } + + public void testParseSkipSectionIncompleteKnownIssues() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + reason: "skip known bugs" + known_issues: + - cluster_feature: feature1"""); + + Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + parser = null; // parser is not fully consumed, prevent validation + assertThat( + e.getMessage(), + is( + oneOf( + ("Expected fields [cluster_feature, fixed_by], but got [cluster_feature]"), + ("Expected fields [fixed_by, cluster_feature], but got [cluster_feature]") + ) + ) + ); + } + public void testParseSkipSectionNoReason() throws Exception { parser = createParser(YamlXContent.yamlXContent, """ skip: @@ -313,7 +371,7 @@ public void testParseSkipSectionNoReason() throws Exception { """); Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); - assertThat(e.getMessage(), is("reason is mandatory within skip version section")); + assertThat(e.getMessage(), is("reason is mandatory within this skip section")); } public void testParseSkipSectionNoVersionNorFeature() throws Exception { @@ -323,10 +381,7 @@ public void testParseSkipSectionNoVersionNorFeature() throws Exception { """); Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); - assertThat( - e.getMessage(), - is("at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section") - ); + assertThat(e.getMessage(), is("at least one predicate is mandatory within a skip or requires section")); } public void testParseSkipSectionOsNoVersion() throws Exception { @@ -579,6 +634,31 @@ public void testSkipClusterFeaturesAllRequiredNoneToSkipMatch() { assertTrue(section.requiresCriteriaMet(mockContext)); } + public void testSkipKnownIssue() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnKnownIssue(List.of(new KnownIssue("bug1", "fix1"), new KnownIssue("bug2", "fix2")))), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + assertFalse(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("bug1")).thenReturn(true); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("fix1")).thenReturn(true); + assertFalse(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("bug2")).thenReturn(true); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.clusterHasFeature("fix2")).thenReturn(true); + assertFalse(section.skipCriteriaMet(mockContext)); + } + public void evaluateEmpty() { var section = new PrerequisiteSection(List.of(), "unsupported", List.of(), "required", List.of()); From 61a3415ad6afe44c3540a2946dfdf6767742502c Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Tue, 30 Apr 2024 08:35:48 +0200 Subject: [PATCH 038/244] Add Range*Handler java docs (#105843) --- .../shared/SharedBlobCacheService.java | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index bd67e71eac041..be93bcf9945eb 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -1149,13 +1149,33 @@ public CacheFile getCacheFile(KeyType cacheKey, long length) { @FunctionalInterface public interface RangeAvailableHandler { - // caller that wants to read from x should instead do a positional read from x + relativePos - // caller should also only read up to length, further bytes will be offered by another call to this method + /** + * Callback method used to read data from the cache. The target is typically captured by the callback implementation. + * + * A caller should only read up to length, further bytes will be offered by another call to this method + * + * @param channel is the cache region to read from + * @param channelPos a position in the channel (cache file) to read from + * @param relativePos a position in the target buffer to store bytes and pass to the caller + * @param length of the blob that can be read (must not be exceeded) + * @return number of bytes read + * @throws IOException on failure + */ int onRangeAvailable(SharedBytes.IO channel, int channelPos, int relativePos, int length) throws IOException; } @FunctionalInterface public interface RangeMissingHandler { + /** + * Callback method used to fetch data (usually from a remote storage) and write it in the cache. + * + * @param channel is the cache region to write to + * @param channelPos a position in the channel (cache file) to write to + * @param relativePos the relative position in the remote storage to read from + * @param length of data to fetch + * @param progressUpdater consumer to invoke with the number of copied bytes as they are written in cache. + * This is used to notify waiting readers that data become available in cache. + */ void fillCacheRange(SharedBytes.IO channel, int channelPos, int relativePos, int length, IntConsumer progressUpdater) throws IOException; } From a2d9cc6473b7c433243391b35a634aca6a7c62af Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 30 Apr 2024 07:39:39 +0100 Subject: [PATCH 039/244] Encapsulate `MasterNodeRequest#masterNodeTimeout` (#107999) There's no good reason for this field to have `protected` visibility, and we definitely don't want subclasses to be able to set it to `null`. This commit makes it `private`. Relates #107984 --- .../cluster/reroute/ClusterRerouteRequest.java | 4 ++-- .../snapshots/create/CreateSnapshotRequest.java | 4 ++-- .../admin/cluster/state/ClusterStateRequest.java | 2 +- .../indices/settings/put/UpdateSettingsRequest.java | 12 ++++++++++-- .../action/support/master/MasterNodeRequest.java | 13 +++++++++++-- .../UpdateIndexShardSnapshotStatusRequest.java | 3 +-- .../xpack/core/ccr/action/CcrStatsAction.java | 6 +++--- .../MountSearchableSnapshotRequest.java | 4 ++-- 8 files changed, 32 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index 5aeef6b19298e..b355d3c50400e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -131,12 +131,12 @@ public boolean equals(Object obj) { && Objects.equals(explain, other.explain) && Objects.equals(ackTimeout(), other.ackTimeout()) && Objects.equals(retryFailed, other.retryFailed) - && Objects.equals(masterNodeTimeout, other.masterNodeTimeout); + && Objects.equals(masterNodeTimeout(), other.masterNodeTimeout()); } @Override public int hashCode() { // Override equals and hashCode for testing - return Objects.hash(commands, dryRun, explain, ackTimeout(), retryFailed, masterNodeTimeout); + return Objects.hash(commands, dryRun, explain, ackTimeout(), retryFailed, masterNodeTimeout()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index 2e8a28d412e26..9127092bdb13a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -461,7 +461,7 @@ public boolean equals(Object o) { && Arrays.equals(indices, that.indices) && Objects.equals(indicesOptions, that.indicesOptions) && Arrays.equals(featureStates, that.featureStates) - && Objects.equals(masterNodeTimeout, that.masterNodeTimeout) + && Objects.equals(masterNodeTimeout(), that.masterNodeTimeout()) && Objects.equals(userMetadata, that.userMetadata); } @@ -495,7 +495,7 @@ public String toString() { + ", waitForCompletion=" + waitForCompletion + ", masterNodeTimeout=" - + masterNodeTimeout + + masterNodeTimeout() + ", metadata=" + userMetadata + '}'; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java index e9de49dcbf5b4..d29996711d722 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequest.java @@ -222,7 +222,7 @@ public String getDescription() { if (indices.length > 0) { stringBuilder.append("indices ").append(Arrays.toString(indices)).append(", "); } - stringBuilder.append("master timeout [").append(masterNodeTimeout).append("]]"); + stringBuilder.append("master timeout [").append(masterNodeTimeout()).append("]]"); return stringBuilder.toString(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 666419edc1bf0..7fa2e11317a43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -254,7 +254,7 @@ public boolean equals(Object o) { return false; } UpdateSettingsRequest that = (UpdateSettingsRequest) o; - return masterNodeTimeout.equals(that.masterNodeTimeout) + return masterNodeTimeout().equals(that.masterNodeTimeout()) && ackTimeout().equals(that.ackTimeout()) && Objects.equals(settings, that.settings) && Objects.equals(indicesOptions, that.indicesOptions) @@ -265,7 +265,15 @@ && ackTimeout().equals(that.ackTimeout()) @Override public int hashCode() { - return Objects.hash(masterNodeTimeout, ackTimeout(), settings, indicesOptions, preserveExisting, reopen, Arrays.hashCode(indices)); + return Objects.hash( + masterNodeTimeout(), + ackTimeout(), + settings, + indicesOptions, + preserveExisting, + reopen, + Arrays.hashCode(indices) + ); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 6459f6c1b458a..063dbb0397de8 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.core.TimeValue; import java.io.IOException; +import java.util.Objects; /** * A based request for master based operation. @@ -22,10 +23,18 @@ public abstract class MasterNodeRequest Date: Tue, 30 Apr 2024 10:22:26 +0200 Subject: [PATCH 040/244] Handle parallel calls to createWeight when profiling is on (#108041) We disable inter-segment concurrency in the query phase whenever profile is on, because there are known concurrency issues that need fixing. The way we disable concurrency is by creating a single slice that search will execute against. We still offload the execution to the search workers thread pool. Inter-segment concurrency in Lucene is though not always based on slices. Knn query (as well as terms enum loading and other places) parallelizes across all segments independently of slices that group multiple segments together. That behavior is not easy to disable unless you don't set the executor to the searcher, in which case though you entirely disable using the separate executor for potentially heavy CPU/IO based loads which is not desirable. That means that when executing a knn query, it will execute in parallel (in DFS as well as in the query phase) no matter if inter-segment concurrency has been disabled because profiling is on. When using pre-filtering, there are queries like multi term queries that will call createWeight from each segment, in parallel, when pulling the scorer. That causes non-deterministic behavior as the profiler does not support concurrent access to some of its data structures. This commit protects the profiler from concurrent access to its data structures by synchronizing access to its tree. Performance is not a concern here, as profiler is already known to slow down query execution. Closes #104235 Closes #104131 --- docs/changelog/108041.yaml | 7 ++++++ .../search/profile/dfs/DfsProfilerIT.java | 1 - .../profile/AbstractInternalProfileTree.java | 24 +++++++------------ .../profile/AbstractProfileBreakdown.java | 2 +- 4 files changed, 16 insertions(+), 18 deletions(-) create mode 100644 docs/changelog/108041.yaml diff --git a/docs/changelog/108041.yaml b/docs/changelog/108041.yaml new file mode 100644 index 0000000000000..a82e0798dba21 --- /dev/null +++ b/docs/changelog/108041.yaml @@ -0,0 +1,7 @@ +pr: 108041 +summary: Handle parallel calls to `createWeight` when profiling is on +area: Search +type: bug +issues: + - 104131 + - 104235 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java index 65393f4185ce8..7e504a100ba56 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/dfs/DfsProfilerIT.java @@ -39,7 +39,6 @@ public class DfsProfilerIT extends ESIntegTestCase { private static final int KNN_DIM = 3; - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104235") public void testProfileDfs() throws Exception { String textField = "text_field"; String numericField = "number"; diff --git a/server/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java b/server/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java index 0e567e8f168b7..ab3c3652e5268 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java +++ b/server/src/main/java/org/elasticsearch/search/profile/AbstractInternalProfileTree.java @@ -18,25 +18,17 @@ public abstract class AbstractInternalProfileTree, E> { - protected ArrayList breakdowns; + private final ArrayList breakdowns = new ArrayList<>(10); /** Maps the Query to it's list of children. This is basically the dependency tree */ - protected ArrayList> tree; + private final ArrayList> tree = new ArrayList<>(10); /** A list of the original queries, keyed by index position */ - protected ArrayList elements; + private final ArrayList elements = new ArrayList<>(10); /** A list of top-level "roots". Each root can have its own tree of profiles */ - protected ArrayList roots; + private final ArrayList roots = new ArrayList<>(10); /** A temporary stack used to record where we are in the dependency tree. */ - protected Deque stack; + private final Deque stack = new ArrayDeque<>(10); private int currentToken = 0; - public AbstractInternalProfileTree() { - breakdowns = new ArrayList<>(10); - stack = new ArrayDeque<>(10); - tree = new ArrayList<>(10); - elements = new ArrayList<>(10); - roots = new ArrayList<>(10); - } - /** * Returns a {@link QueryProfileBreakdown} for a scoring query. Scoring queries (e.g. those * that are past the rewrite phase and are now being wrapped by createWeight() ) follow @@ -48,7 +40,7 @@ public AbstractInternalProfileTree() { * @param query The scoring query we wish to profile * @return A ProfileBreakdown for this query */ - public PB getProfileBreakdown(E query) { + public final synchronized PB getProfileBreakdown(E query) { int token = currentToken; boolean stackEmpty = stack.isEmpty(); @@ -109,7 +101,7 @@ private PB addDependencyNode(E element, int token) { /** * Removes the last (e.g. most recent) value on the stack */ - public void pollLast() { + public final synchronized void pollLast() { stack.pollLast(); } @@ -120,7 +112,7 @@ public void pollLast() { * * @return a hierarchical representation of the profiled query tree */ - public List getTree() { + public final synchronized List getTree() { ArrayList results = new ArrayList<>(roots.size()); for (Integer root : roots) { results.add(doGetTree(root)); diff --git a/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java b/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java index 28fc36e09a50d..45d12be00ac11 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java +++ b/server/src/main/java/org/elasticsearch/search/profile/AbstractProfileBreakdown.java @@ -44,7 +44,7 @@ public AbstractProfileBreakdown(Class clazz) { * @param timingType the timing type to create a new {@link Timer} for * @return a new {@link Timer} instance */ - public Timer getNewTimer(T timingType) { + public final Timer getNewTimer(T timingType) { Timer timer = new Timer(); timings.get(timingType).add(timer); return timer; From f4fac1e545a26b6fe5e6b2ca23ae6804d5c4cc3e Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Tue, 30 Apr 2024 10:53:53 +0200 Subject: [PATCH 041/244] Update README for YAML Rest API specs (#107837) --- .../rest-api-spec/test/README.asciidoc | 223 ++++++++++++------ 1 file changed, 157 insertions(+), 66 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc index c2baa6746afdb..0fcedece97f01 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc @@ -87,7 +87,108 @@ in the `indices.get_settings` API. == Skipping tests: -=== Skip for Elasticsearch versions +If a test section should only be run for certain releases of Elasticsearch, +then the first entry in the section (after the title) should be called +`requires` and / or `skip` depending on the use case. + +A `requires` section defines requirements that have to be met in order for tests to run, such as: + +- `cluster_features` to <>. +- `test_runner_features` to <>. + +A `requires` section must specify at least one of the two options above. +If `cluster_features` are required, a `reason` must be provided in addition. + +A `skip` section, on the other hand, defines certain conditions that, if met, will skip the test, such as: + +- `cluster_features` to <>. +- `known_issues` to <>. +- `awaits_fix` to <>. +- `os` to <>. +- `features`: Only kept for a transition period, please use <> + in the `requires` section instead. + +A `skip` section must specify at least one of the options above. +Unless only legacy test runner `features` are required, a `reason` must also be provided. + +`requires` and / or `skip` can also be used at the top level of the file in the `setup` and `teardown` blocks, +so all the tests in a file will be skipped if either any requirement fails or any skip condition applies regardless +if defined in `setup` and `teardown`. + +[[cluster_features]] +=== Require or skip cluster features + +In the past, Elasticsearch has been over-reliant on release version checks all across its code base. +This has become a limiting factor when deploying Elasticsearch in a more continuous fashion. + +Moving forward, the goal is to base any such check on the availability of a cluster feature. +Rather than <>, you can +require `cluster_features` to be either present (`requires`) and / or absent (`skip`) in order to run a test. +For instance: + +.... + "Parent": + - requires: + cluster_features: feature_x + reason: Feature X was introduced + - skip: + cluster_features: feature_x_changed + reason: Change to feature X breaks this test + + - do: + ... test definitions ... +.... + +The `cluster_features` field can either be a string or an array of strings. + +[[synthetic_cluster_features]] +Note: In order to smoothen the transition from version checks to cluster feature checks, a REST-test specific +synthetic cluster feature named `gte_v{VERSION}` is available for all release versions until including 8.14.0. +For instance, `gte_v8.12.2` would be available for all release versions greater than or equal to 8.12.2. + +[[skip_known_issues]] +=== Skip on known issues + +Previously, it was possible to skip ranges of broken release versions using <>. +`known_issues` provides a more explicit way to express and skip a certain range of buggy releases based on cluster features. +Each of possibly multiple issues is a pair of `cluster_feature` and `fixed_by`, where an issue was +introduced by the former feature and eventually fixed by the latter one. For instance: + +.... + "Parent": + - skip: + known_issues: + - cluster_feature: feature_y + fixed_by: feature_y_fix + - cluster_feature: feature_z + fixed_by: feature_z_fix + reason: Skipped for buggy feature_y until fixed by feature_y_fix and feature_z until fixed by feature_z_fix + + - do: + ... test definitions ... +.... + +Note: If a known issue cannot be defined in terms of existing cluster features, the previously described +<> can be used. + +[[skip_awaits_fix]] +=== Skip while awaiting fix + +In certain cases there's no fix available yet. In order to mute a test, use `awaits_fix` with the corresponding ticket / issue. + +For instance: +.... + "Parent": + - skip: + awaits_fix: https://github.com/elastic/elasticsearch/issues/xyz + reason: Muted due to #xyz + + - do: + ... test definitions ... +.... + +[[skip_version]] +=== Skip for Elasticsearch versions (deprecated) If a test section should only be run on certain versions of Elasticsearch, then the first entry in the section (after the title) should be called @@ -135,48 +236,11 @@ The value for version can also be `all`, to skip in any version of Elasticsearch. This can be used for example when a feature is being implemented or awaiting a fix. -`skip` can also be used at the top level of the file in the `setup` and `teardown` blocks, -so all the tests in a file will be skipped if the condition applies. -A particular test is skipped if any of the skip conditions for the test, -the setup or the teardown apply. -This can have a similar effect to the multi-range support described above -in that we can specify tests that only run within a specific range. -For example, if a new feature was introduced in 8.1.0, we could create a test file -with the `setup` block containing a `skip.version` of `" - 8.0.99"`, causing all tests -to be skipped for earlier versions. Then specific tests that are added later could -add to this by either: - -* increasing the upper bound for positive tests (test new enhancement works): -`skip.version: " - 8.6.99"` -* or creating an additional lower bound for negative tests -(test that exception is thrown for older versions, as in multi-range example above): -`skip.version: "8.8.0 - "` - -=== Skip on missing runner features - -The skip section can also be used to list new features that need to be -supported in order to run a test. This way the up-to-date runners will -run the test, while the ones that don't support the feature yet can -temporarily skip it, and avoid having lots of test failures in the meantime. -Once all runners have implemented the feature, it can be declared supported -by default, thus the related skip sections can be removed from the tests. - -The skip section can also be used to selectively mute tests in certain -cases where they would otherwise fail, see `default_shards` and `fips_140`. +[[skip_os]] +=== Skip on certain operating systems -.... - "Parent": - - skip: - features: regex - - - do: - ... test definitions ... -.... - -The `features` field can either be a string or an array of strings. - -The skip section can also be used to mute tests for certain operating systems. -This way it is not necessary to mute the whole test if a operating system +The `skip` section can also be used to mute tests for certain operating systems. +This way it is not necessary to mute the whole test if an operating system specific problem appears. The operating system is taken from the pretty name that elasticsearch reports @@ -185,13 +249,14 @@ for: `initializing client, minimum es version` -When muting by operating system, a reason is mandatory and features must contain -skip_os: +When muting by operating system, a `reason` is mandatory and `skip_os` must be defined as requirement in +`test_runner_features` (see below). .... "Parent": + - requires: + test_runner_features: skip_os - skip: - features: skip_os os: debian-8 reason: memory accounting problems on debian 8, see gh#xyz @@ -201,37 +266,63 @@ skip_os: The `os` field can either be a string or an array of strings. -The skip section requires to specify either a `version`, `features` or `os` list. +[[requires_test_runner_features]] +=== Require specific test runner features + +The `requires` section can also be used to list test runner features that need to be +supported by the runner in order to execute a test. This way the up-to-date runners will +run the test, while the ones that don't support the feature yet can +temporarily skip it, and avoid having lots of test failures in the meantime. +Once all runners have implemented the feature, it can be declared supported +by default, thus the related `requires` sections can be removed from the tests. + +The `requires` section can also be used to selectively mute tests in certain +cases where they would otherwise fail, see `default_shards` and `fips_140`. + +.... + "Parent": + - requires: + test_runner_features: regex + + - do: + ... test definitions ... +.... + +The `test_runner_features` field can either be a string or an array of strings. + +Note: +Tests that are still using `features` in the `skip` sections should be migrated to +`test_runner_features` to avoid confusion with recently added cluster features. -=== Available Features +==== Available test runner features -==== `xpack` +===== `xpack` Requires x-pack to be enabled on the `Elasticsearch` instance the rest test is running against -==== `no_xpack` +===== `no_xpack` Requires the test to run against an oss distribution of `Elasticsearch` -==== `catch_unauthorized` +===== `catch_unauthorized` Runner supports `catch: unauthorized` on a `do` operator. -==== `default_shards` +===== `default_shards` This test can only run if the cluster is running with the distributions default number of shards. The Java test runner introduces randomness and sometimes overrides the default number of shards to `2`. If the default number of shards is changed, test marked with this feature should *not* run -==== `headers` +===== `headers` The runner is able to set per request headers on the `do` operation -==== `node_selector` +===== `node_selector` Indicates the runner can parse `node_selector` under the `do` operator and use its metadata to select the node to perform the `do` operation on. -==== `stash_in_key` +===== `stash_in_key` Allows you to use a stashed value in any key of an object during a `match` assertion @@ -248,7 +339,7 @@ Allows you to use a stashed value in any key of an object during a `match` asser } .... -==== `stash_in_path` +===== `stash_in_path` Allows a stashed value to be referenced in path lookups as a single token. E.g: @@ -256,7 +347,7 @@ Allows a stashed value to be referenced in path lookups as a single token. E.g: path.$stash.value .... -==== `embedded_stash_key` +===== `embedded_stash_key` Allows a stashed key to appear anywhere in the path (note the placeholder needs to be within curly brackets too in this case): @@ -264,7 +355,7 @@ Allows a stashed key to appear anywhere in the path (note the placeholder needs field1.e${placeholder}ments.element1 .... -==== `stash_path_replace` +===== `stash_path_replace` Used only in the doc snippet tests. Allow you to do ease replacements using a special `$_path` marker. .... @@ -272,30 +363,30 @@ Used only in the doc snippet tests. Allow you to do ease replacements using a sp somevalue with whatever is the response in the same position." .... -==== `warnings` +===== `warnings` The runner can assert specific warnings headers are returned by Elasticsearch through the `warning:` assertations under `do:` operations. The test will fail if the warning is not found. -==== `warnings_regex` +===== `warnings_regex` The same as `warnings`, but matches warning headers with the given regular expression. -==== `allowed_warnings` +===== `allowed_warnings` The runner will allow specific warnings headers to be returned by Elasticsearch through the `allowed_warning:` assertations under `do:` operations. The test will not fail if the warning is not found. -==== `allowed_warnings_regex` +===== `allowed_warnings_regex` The same as `allowed_warnings`, but matches warning headers with the given regular expression. -==== `yaml` +===== `yaml` The runner is able to send and receive `application/yaml` and perform all assertions on the returned data. -==== `contains` +===== `contains` Asserts an array of object contains an object with a property set to a certain value. e.g: @@ -310,11 +401,11 @@ Alternatively, this can be used to assert that a string response contains a cert ... contains: { items.0.index.error.reason: "must be mapped" } -==== `transform_and_set` +===== `transform_and_set` Supports the `transform_and_set` operator as described in this document. -==== `arbitrary_key` +===== `arbitrary_key` Allows you to stash an arbitrary key from a returned map e.g: @@ -325,7 +416,7 @@ Allows you to stash an arbitrary key from a returned map e.g: This means: Stash any of the keys returned under `nodes` as `$node_id` -==== `fips_140` +===== `fips_140` This test should not be run when the test cluster is set in FIPS 140 mode. From e784706b4e9ddc5ba58ac45d031c2be8dc0a6499 Mon Sep 17 00:00:00 2001 From: Ioana Tagirta Date: Tue, 30 Apr 2024 11:31:45 +0200 Subject: [PATCH 042/244] Allow typed_keys for search application Search API (#108007) * Allow typed_keys for search application Search API * Update docs/changelog/108007.yaml * Use RestSearchAction.TYPED_KEYS_PARAM --- docs/changelog/108007.yaml | 5 ++ .../apis/search-application-search.asciidoc | 5 ++ .../api/search_application.search.json | 7 ++ x-pack/plugin/ent-search/qa/rest/roles.yml | 1 + .../search/55_search_application_search.yml | 84 +++++++++++++++++++ .../RestQuerySearchApplicationAction.java | 8 ++ 6 files changed, 110 insertions(+) create mode 100644 docs/changelog/108007.yaml diff --git a/docs/changelog/108007.yaml b/docs/changelog/108007.yaml new file mode 100644 index 0000000000000..5d24f8c87597c --- /dev/null +++ b/docs/changelog/108007.yaml @@ -0,0 +1,5 @@ +pr: 108007 +summary: Allow `typed_keys` for search application Search API +area: Application +type: feature +issues: [] diff --git a/docs/reference/search-application/apis/search-application-search.asciidoc b/docs/reference/search-application/apis/search-application-search.asciidoc index b166c8aae04d0..2d13ed5f11037 100644 --- a/docs/reference/search-application/apis/search-application-search.asciidoc +++ b/docs/reference/search-application/apis/search-application-search.asciidoc @@ -23,6 +23,11 @@ Unspecified template parameters will be assigned their default values (if applic Requires read privileges on the backing alias of the search application. [[search-application-search-path-params]] +==== {api-path-parms-title} + +`typed_keys`:: +(Optional, Boolean) If `true`, aggregation and suggester names are prefixed +by their respective types in the response. Defaults to `false`. [[search-application-search-request-body]] ==== {api-request-body-title} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_application.search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_application.search.json index 93f00212d0592..d0a8d36d9b46d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_application.search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_application.search.json @@ -34,6 +34,13 @@ "body": { "description": "Search parameters, including template parameters that override defaults", "required": false + }, + "params": { + "typed_keys":{ + "type":"boolean", + "default":false, + "description": "Specify whether aggregation and suggester names should be prefixed by their respective types in the response" + } } } } diff --git a/x-pack/plugin/ent-search/qa/rest/roles.yml b/x-pack/plugin/ent-search/qa/rest/roles.yml index 89ab91b2694d6..d32f05b7b749e 100644 --- a/x-pack/plugin/ent-search/qa/rest/roles.yml +++ b/x-pack/plugin/ent-search/qa/rest/roles.yml @@ -25,6 +25,7 @@ user: "test-index1", "test-search-application", "test-search-application-1", + "test-search-application-with-aggs", "test-search-application-with-list", "test-search-application-with-list-invalid", ".elastic-connectors-v1", diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/search/55_search_application_search.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/search/55_search_application_search.yml index 42a356038ae68..cda7cb431c2da 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/search/55_search_application_search.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/search/55_search_application_search.yml @@ -111,6 +111,34 @@ setup: boost: 3 lang: "mustache" + - do: + search_application.put: + name: test-search-application-with-aggs + body: + indices: [ "test-search-index1", "test-search-index2" ] + analytics_collection_name: "test-analytics" + template: + script: + source: + query: + term: + "{{field_name}}": "{{field_value}}" + aggs: + my_agg: + value_count: + field: "field1.keyword" + params: + field_name: field1 + field_value: value1 + dictionary: + additionalProperties: false + required: [ "field_name" ] + properties: + field_name: + type: string + field_value: + type: string + - do: index: index: test-search-index1 @@ -151,6 +179,11 @@ teardown: name: test-search-application-with-list-invalid ignore: 404 + - do: + search_application.delete: + name: test-search-application-with-aggs + ignore: 404 + - do: indices.delete: index: test-search-index1 @@ -318,3 +351,54 @@ teardown: - name: field3 boost: 3 +--- +"Search Application search with typed keys includes type prefix in aggregation names": + - skip: + features: headers + + - do: + headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user + search_application.search: + name: test-search-application-with-aggs + typed_keys: true + body: + params: + field_name: field2 + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc1" } + - match: { aggregations.value_count#my_agg.value: 1 } + +--- +"Search Application search with typed keys set to false returns aggregations without type prefix": + - skip: + features: headers + + - do: + headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user + search_application.search: + name: test-search-application-with-aggs + body: + params: + field_name: field2 + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc1" } + - match: { aggregations.my_agg.value: 1 } + +--- +"Search Application search without typed keys returns aggregations without type prefix": + - skip: + features: headers + + - do: + headers: { Authorization: "Basic ZW50c2VhcmNoLXVzZXI6ZW50c2VhcmNoLXVzZXItcGFzc3dvcmQ=" } # user + search_application.search: + name: test-search-application-with-aggs + body: + params: + field_name: field2 + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "doc1" } + - match: { aggregations.my_agg.value: 1 } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java index de0bb837acef8..16aa24b16c291 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java @@ -14,12 +14,14 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; +import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.EnterpriseSearchBaseRestHandler; import org.elasticsearch.xpack.application.utils.LicenseUtils; import java.io.IOException; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -31,6 +33,7 @@ public RestQuerySearchApplicationAction(XPackLicenseState licenseState) { } public static final String ENDPOINT_PATH = "/" + EnterpriseSearch.SEARCH_APPLICATION_API_ENDPOINT + "/{name}" + "/_search"; + public static final Set RESPONSE_PARAMS = Set.of(RestSearchAction.TYPED_KEYS_PARAM); @Override public String getName() { @@ -56,4 +59,9 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeC cancelClient.execute(QuerySearchApplicationAction.INSTANCE, request, new RestRefCountedChunkedToXContentListener<>(channel)); }; } + + @Override + protected Set responseParams() { + return RESPONSE_PARAMS; + } } From bdcbb0416c4e70d99ce21ae5d3e97772836f3503 Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Tue, 30 Apr 2024 12:35:53 +0200 Subject: [PATCH 043/244] [Connector API] Mark connector management APIs as beta in 8.14 (#108004) --- .../cancel-connector-sync-job-api.asciidoc | 2 +- .../connector/apis/connector-apis.asciidoc | 91 +++++++++++++++---- .../apis/create-connector-api.asciidoc | 2 +- .../create-connector-sync-job-api.asciidoc | 2 +- .../apis/delete-connector-api.asciidoc | 2 +- .../delete-connector-sync-job-api.asciidoc | 2 +- .../connector/apis/get-connector-api.asciidoc | 2 +- .../apis/get-connector-sync-job-api.asciidoc | 2 +- .../list-connector-sync-jobs-api.asciidoc | 2 +- .../apis/list-connectors-api.asciidoc | 2 +- .../update-connector-api-key-id-api.asciidoc | 2 +- ...pdate-connector-configuration-api.asciidoc | 2 +- .../update-connector-filtering-api.asciidoc | 3 +- .../update-connector-index-name-api.asciidoc | 2 +- ...te-connector-name-description-api.asciidoc | 2 +- .../update-connector-pipeline-api.asciidoc | 2 +- .../update-connector-scheduling-api.asciidoc | 2 +- ...update-connector-service-type-api.asciidoc | 2 +- 18 files changed, 89 insertions(+), 37 deletions(-) diff --git a/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc index ac22f2c4adf64..0519cfdf15984 100644 --- a/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/cancel-connector-sync-job-api.asciidoc @@ -4,7 +4,7 @@ Cancel connector sync job ++++ -preview::[] +beta::[] Cancels a connector sync job. diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc index b5f3d1a1aa87b..c8b06bb1d0a6e 100644 --- a/docs/reference/connector/apis/connector-apis.asciidoc +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -1,7 +1,7 @@ [[connector-apis]] == Connector APIs -preview::[] +beta::[] The connector and sync jobs APIs provide a convenient way to create and manage Elastic {enterprise-search-ref}/connectors.html[connectors^] and sync jobs in an internal index. @@ -29,21 +29,29 @@ You can use these APIs to create, get, delete and update connectors. Use the following APIs to manage connectors: * <> +beta:[] * <> +beta:[] * <> +beta:[] * <> -* <> +beta:[] * <> +beta:[] * <> -* <> +beta:[] * <> +beta:[] * <> -* <> +beta:[] * <> +beta:[] * <> +beta:[] * <> +beta:[] * <> -* <> +beta:[] [discrete] @@ -54,38 +62,81 @@ You can use these APIs to create, cancel, delete and update sync jobs. Use the following APIs to manage sync jobs: - -* <> -* <> * <> +beta:[] +* <> +beta:[] * <> +beta:[] * <> +beta:[] * <> +beta:[] + + +[discrete] +[[service-apis]] +=== Service APIs + +preview::[] + +*Connector Service APIs* are a subset of Connector API endpoints, that represent framework-level operations defined in the https://github.com/elastic/connectors/blob/main/docs/CONNECTOR_PROTOCOL.md[Connector Protocol]. These APIs are not intended for direct connector management by users but are there to support the implementation of services that utilize the Conector Protocol to communicate with {es}. + +[TIP] +==== +All Elastic connectors are built using our Python connector framework. The source code is available in the https://github.com/elastic/connectors[elastic/connectors] repository on GitHub. +==== + +[discrete] +[[connector-service-apis]] +==== Connector Service APIs + +* <> +preview:[] +* <> +preview:[] +* <> +preview:[] +* <> +preview:[] + +[discrete] +[[sync-job-service-apis]] +==== Sync Job Service APIs + +* <> +preview:[] * <> +preview:[] * <> +preview:[] + -include::cancel-connector-sync-job-api.asciidoc[] -include::check-in-connector-api.asciidoc[] -include::check-in-connector-sync-job-api.asciidoc[] include::create-connector-api.asciidoc[] -include::create-connector-sync-job-api.asciidoc[] include::delete-connector-api.asciidoc[] -include::delete-connector-sync-job-api.asciidoc[] include::get-connector-api.asciidoc[] -include::get-connector-sync-job-api.asciidoc[] include::list-connectors-api.asciidoc[] -include::list-connector-sync-jobs-api.asciidoc[] -include::set-connector-sync-job-error-api.asciidoc[] -include::set-connector-sync-job-stats-api.asciidoc[] include::update-connector-api-key-id-api.asciidoc[] include::update-connector-configuration-api.asciidoc[] -include::update-connector-error-api.asciidoc[] -include::update-connector-filtering-api.asciidoc[] include::update-connector-index-name-api.asciidoc[] -include::update-connector-last-sync-api.asciidoc[] +include::update-connector-filtering-api.asciidoc[] include::update-connector-name-description-api.asciidoc[] include::update-connector-pipeline-api.asciidoc[] include::update-connector-scheduling-api.asciidoc[] include::update-connector-service-type-api.asciidoc[] + +include::create-connector-sync-job-api.asciidoc[] +include::cancel-connector-sync-job-api.asciidoc[] +include::delete-connector-sync-job-api.asciidoc[] +include::get-connector-sync-job-api.asciidoc[] +include::list-connector-sync-jobs-api.asciidoc[] + +include::check-in-connector-api.asciidoc[] +include::update-connector-error-api.asciidoc[] +include::update-connector-last-sync-api.asciidoc[] include::update-connector-status-api.asciidoc[] + +include::check-in-connector-sync-job-api.asciidoc[] +include::set-connector-sync-job-error-api.asciidoc[] +include::set-connector-sync-job-stats-api.asciidoc[] diff --git a/docs/reference/connector/apis/create-connector-api.asciidoc b/docs/reference/connector/apis/create-connector-api.asciidoc index 15dc4ed43c72d..04f91acc8640a 100644 --- a/docs/reference/connector/apis/create-connector-api.asciidoc +++ b/docs/reference/connector/apis/create-connector-api.asciidoc @@ -4,7 +4,7 @@ Create connector ++++ -preview::[] +beta::[] Creates an Elastic connector. Connectors are {es} integrations that bring content from third-party data sources, which can be deployed on {ecloud} or hosted on your own infrastructure: diff --git a/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc index c4fdd362c31c0..43a2339c56847 100644 --- a/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/create-connector-sync-job-api.asciidoc @@ -4,7 +4,7 @@ Create connector sync job ++++ -preview::[] +beta::[] Creates a connector sync job. diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc index 2e7c7a3b60708..d8dfea8c401ce 100644 --- a/docs/reference/connector/apis/delete-connector-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -4,7 +4,7 @@ Delete connector ++++ -preview::[] +beta::[] Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. diff --git a/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc index 1e53c7f843afd..0c3274454845c 100644 --- a/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-sync-job-api.asciidoc @@ -4,7 +4,7 @@ Delete connector sync job ++++ -preview::[] +beta::[] Removes a connector sync job and its associated data. This is a destructive action that is not recoverable. diff --git a/docs/reference/connector/apis/get-connector-api.asciidoc b/docs/reference/connector/apis/get-connector-api.asciidoc index 3a546ab372b67..7e5186b146c51 100644 --- a/docs/reference/connector/apis/get-connector-api.asciidoc +++ b/docs/reference/connector/apis/get-connector-api.asciidoc @@ -4,7 +4,7 @@ Get connector ++++ -preview::[] +beta::[] Retrieves the details about a connector. diff --git a/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc index 0c136f8e037b0..e162fe191375c 100644 --- a/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc +++ b/docs/reference/connector/apis/get-connector-sync-job-api.asciidoc @@ -4,7 +4,7 @@ Get connector sync job ++++ -preview::[] +beta::[] Retrieves the details about a connector sync job. diff --git a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc index 303abdaa546b1..410bec7ac38ac 100644 --- a/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc +++ b/docs/reference/connector/apis/list-connector-sync-jobs-api.asciidoc @@ -5,7 +5,7 @@ List connector sync jobs ++++ -preview::[] +beta::[] Returns information about all stored connector sync jobs ordered by their creation date in ascending order. diff --git a/docs/reference/connector/apis/list-connectors-api.asciidoc b/docs/reference/connector/apis/list-connectors-api.asciidoc index 94578dbd493e1..c24acec5e82ce 100644 --- a/docs/reference/connector/apis/list-connectors-api.asciidoc +++ b/docs/reference/connector/apis/list-connectors-api.asciidoc @@ -5,7 +5,7 @@ List connectors ++++ -preview::[] +beta::[] Returns information about all created connectors. diff --git a/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc b/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc index 9b08ceea0aacc..b2b9dd7958191 100644 --- a/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-api-key-id-api.asciidoc @@ -4,7 +4,7 @@ Update connector API key id ++++ -preview::[] +beta::[] Updates the `api_key_id` and/or `api_key_secret_id` field(s) of a connector, specifying: diff --git a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc index 256621afb8fc5..51252be4a04c3 100644 --- a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc @@ -4,7 +4,7 @@ Update connector configuration ++++ -preview::[] +beta::[] Updates a connector's `configuration`, allowing for config value updates within a registered configuration schema. diff --git a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc index c028eece2e168..f864b68f65395 100644 --- a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc @@ -4,7 +4,7 @@ Update connector filtering ++++ -preview::[] +beta::[] Updates the draft `filtering` configuration of a connector and marks the draft validation state as `edited`. The filtering configuration can be activated once validated by the Elastic connector service. @@ -14,6 +14,7 @@ The filtering property is used to configure sync rules (both basic and advanced) ==== {api-request-title} `PUT _connector//_filtering` + `PUT _connector//_filtering/_activate` [[update-connector-filtering-api-prereq]] diff --git a/docs/reference/connector/apis/update-connector-index-name-api.asciidoc b/docs/reference/connector/apis/update-connector-index-name-api.asciidoc index 02a4c0e762b28..3f931d8ac5edd 100644 --- a/docs/reference/connector/apis/update-connector-index-name-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-index-name-api.asciidoc @@ -4,7 +4,7 @@ Update connector index name ++++ -preview::[] +beta::[] Updates the `index_name` field of a connector, specifying the index where the data ingested by the connector is stored. diff --git a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc index 7fb5deb746473..9ecf0f5d175fd 100644 --- a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc @@ -4,7 +4,7 @@ Update connector name and description ++++ -preview::[] +beta::[] Updates the `name` and `description` fields of a connector. diff --git a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc index 30873ca5f5577..f8f804595eb19 100644 --- a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc @@ -4,7 +4,7 @@ Update connector pipeline ++++ -preview::[] +beta::[] Updates the `pipeline` configuration of a connector. diff --git a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc index df7a18ec6ad66..6d30197dd3390 100644 --- a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc @@ -4,7 +4,7 @@ Update connector scheduling ++++ -preview::[] +beta::[] Updates the `scheduling` configuration of a connector. diff --git a/docs/reference/connector/apis/update-connector-service-type-api.asciidoc b/docs/reference/connector/apis/update-connector-service-type-api.asciidoc index 9f4b1a6fc9a24..fb61a25848a9d 100644 --- a/docs/reference/connector/apis/update-connector-service-type-api.asciidoc +++ b/docs/reference/connector/apis/update-connector-service-type-api.asciidoc @@ -4,7 +4,7 @@ Update connector service type ++++ -preview::[] +beta::[] Updates the `service_type` of a connector. From fd6c2677953228d909ca05833e03405686298d3f Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 30 Apr 2024 11:41:12 +0100 Subject: [PATCH 044/244] AwaitsFix for #108061 --- .../java/org/elasticsearch/search/ccs/CrossClusterIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index 501f46fb52b4b..e96689ce2846d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -188,6 +188,7 @@ public void testProxyConnectionDisconnect() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/108061") public void testCancel() throws Exception { assertAcked(client(LOCAL_CLUSTER).admin().indices().prepareCreate("demo")); indexDocs(client(LOCAL_CLUSTER), "demo"); From 46d4ba9cbc0a959c63cbd3ff3fc41c9028574bf5 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 30 Apr 2024 13:59:20 +0200 Subject: [PATCH 045/244] [Profiling] Round top-level values in TopN API (#108054) Typically double values are rounded in profiling APIs. However, we have missed `self_annual_co2_tons` and `self_annual_cost_usd` in the TopN functions API. With this commit we also round these two values according to the existing convention. Relates elastic/kibana#182001 --- .../action/GetTopNFunctionsResponse.java | 4 +- .../action/GetTopNFunctionsResponseTests.java | 95 +++++++++++++++++++ 2 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponseTests.java diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponse.java index a42e64546058c..4ee496dcb2870 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponse.java @@ -67,8 +67,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("self_count", selfCount); builder.field("total_count", totalCount); - builder.field("self_annual_co2_tons", annualCo2Tons); - builder.field("self_annual_cost_usd", annualCostsUsd); + builder.field("self_annual_co2_tons").rawValue(NumberUtils.doubleToString(annualCo2Tons)); + builder.field("self_annual_cost_usd").rawValue(NumberUtils.doubleToString(annualCostsUsd)); builder.xContentList("topn", topNFunctions); builder.endObject(); return builder; diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponseTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponseTests.java new file mode 100644 index 0000000000000..ebb3d492b024c --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/action/GetTopNFunctionsResponseTests.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling.action; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; + +public class GetTopNFunctionsResponseTests extends ESTestCase { + + public void testToXContent() throws IOException { + String fileID = "6tVKI4mSYDEJ-ABAIpYXcg"; + int frameType = 1; + boolean inline = false; + int addressOrLine = 23; + String functionName = "PyDict_GetItemWithError"; + String sourceFilename = "/build/python3.9-RNBry6/python3.9-3.9.2/Objects/dictobject.c"; + int sourceLine = 1456; + String exeFilename = "python3.9"; + + String frameGroupID = FrameGroupID.create(fileID, addressOrLine, exeFilename, sourceFilename, functionName); + + XContentType contentType = randomFrom(XContentType.values()); + + // tag::noformat + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .field("self_count", 1) + .field("total_count", 10) + .field("self_annual_co2_tons").rawValue("2.2000") + .field("self_annual_cost_usd").rawValue("12.0000") + .startArray("topn") + .startObject() + .field("id", frameGroupID) + .field("rank", 1) + .startObject("frame") + .field("frame_type", frameType) + .field("inline", inline) + .field("address_or_line", addressOrLine) + .field("function_name", functionName) + .field("file_name", sourceFilename) + .field("line_number", sourceLine) + .field("executable_file_name", exeFilename) + .endObject() + .field("sub_groups", Map.of("basket", 7L)) + .field("self_count", 1) + .field("total_count", 10) + .field("self_annual_co2_tons").rawValue("2.2000") + .field("total_annual_co2_tons").rawValue("22.0000") + .field("self_annual_costs_usd").rawValue("12.0000") + .field("total_annual_costs_usd").rawValue("120.0000") + .endObject() + .endArray() + .endObject(); + // end::noformat + + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + TopNFunction topNFunction = new TopNFunction( + frameGroupID, + 1, + frameType, + inline, + addressOrLine, + functionName, + sourceFilename, + sourceLine, + exeFilename, + 1, + 10, + 2.2d, + 22.0d, + 12.0d, + 120.0d, + Map.of("basket", 7L) + ); + GetTopNFunctionsResponse response = new GetTopNFunctionsResponse(1, 10, 2.2d, 12.0d, List.of(topNFunction)); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + } +} From 67748cf616a17793ae2e559cc2bd90841295841c Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 30 Apr 2024 08:25:37 -0400 Subject: [PATCH 046/244] Adding docs about scaled_float saturation with long values (#107966) --- docs/reference/mapping/types/numeric.asciidoc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index 32f4964e8ca43..1e87faea5b13a 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -227,6 +227,18 @@ numeric field can't be both a time series dimension and a time series metric. of `scaling_factor` improve accuracy but also increase space requirements. This parameter is required. +[[scaled-float-saturation]] +==== `scaled_float` saturation + +`scaled_float` is stored as a single `long` value, which is the product of multiplying the original value by the scaling factor. If the multiplication +results in a value that is outside the range of a `long`, the value is saturated +to the minimum or maximum value of a `long`. For example, if the scaling factor +is +100+ and the value is +92233720368547758.08+, the expected value is +9223372036854775808+. +However, the value that is stored is +9223372036854775807+, the maximum value for a `long`. + +This can lead to unexpected results with <> +when the scaling factor or provided `float` value are exceptionally large. + [[numeric-synthetic-source]] ==== Synthetic `_source` From 03651dc8c302e01a3152a82fdc51b7e01480b4be Mon Sep 17 00:00:00 2001 From: Pat Whelan Date: Tue, 30 Apr 2024 08:28:07 -0400 Subject: [PATCH 047/244] [Transform] Exit gracefully when deleted (#107917) Check if the Transform was aborted before failing due to missing Transform config. If the `DELETE _transform/id` API is called while the Indexer is looking up the Config, it is possible the delete API will remove the Config before the Indexer can retrieve the Config. Rather than fail the Transform, the indexer will check if the delete API has been called via the `ABORTING` state and move into its graceful shutdown sequence. Co-authored-by: Elastic Machine --- docs/changelog/107917.yaml | 6 ++ .../transforms/TransformIndexer.java | 16 +++- .../TransformIndexerStateTests.java | 96 +++++++++++++++++-- 3 files changed, 105 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/107917.yaml diff --git a/docs/changelog/107917.yaml b/docs/changelog/107917.yaml new file mode 100644 index 0000000000000..18125bf46f4b7 --- /dev/null +++ b/docs/changelog/107917.yaml @@ -0,0 +1,6 @@ +pr: 107917 +summary: Exit gracefully when deleted +area: Transform +type: bug +issues: + - 107266 diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java index 36d10653aae63..a55260e789aaa 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformIndexer.java @@ -390,11 +390,19 @@ protected void onStart(long now, ActionListener listener) { } }, failure -> { String msg = TransformMessages.getMessage(TransformMessages.FAILED_TO_RELOAD_TRANSFORM_CONFIGURATION, getJobId()); - // If the transform config index or the transform config is gone, something serious occurred - // We are in an unknown state and should fail out + // If the transform config index or the transform config is gone, then it is possible the transform was deleted. + // If the transform was deleted, it will be in the Aborting state, and we can safely return out. If it is not in the + // Aborting state, then something serious has occurred, and we should fail out. if (failure instanceof ResourceNotFoundException) { - logger.error(msg, failure); - reLoadFieldMappingsListener.onFailure(new TransformConfigLostOnReloadException(msg, failure)); + if (IndexerState.ABORTING == getState()) { + logger.atDebug() + .withThrowable(failure) + .log("Transform is in state [{}] during possible failure [{}].", IndexerState.ABORTING.value(), msg); + listener.onResponse(false); + } else { + logger.error(msg, failure); + reLoadFieldMappingsListener.onFailure(new TransformConfigLostOnReloadException(msg, failure)); + } } else { logger.warn(msg, failure); auditor.warning(getJobId(), msg); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java index b9c4067da6b91..a474976cf9dfa 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; @@ -76,7 +77,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; public class TransformIndexerStateTests extends ESTestCase { @@ -114,6 +121,7 @@ class MockedTransformIndexer extends TransformIndexer { private CountDownLatch startLatch; private CountDownLatch searchLatch; private CountDownLatch doProcessLatch; + private CountDownLatch finishLatch = new CountDownLatch(1); MockedTransformIndexer( ThreadPool threadPool, @@ -276,6 +284,26 @@ void validate(ActionListener listener) { listener.onResponse(null); } + @Override + protected void onFinish(ActionListener listener) { + try { + super.onFinish(listener); + } finally { + finishLatch.countDown(); + } + } + + public void waitUntilFinished() throws InterruptedException { + assertTrue( + Strings.format( + "Timed out waiting for the Indexer to complete onFinish(). Indexer state and stats: [{}] [{}]", + getState().value(), + getStats() + ), + finishLatch.await(5, TimeUnit.SECONDS) + ); + } + void finishCheckpoint() { searchResponse = null; } @@ -704,11 +732,9 @@ public void testStopBeforeIndexingThreadStarts() throws Exception { // now let the indexer thread run startLatch.countDown(); - - assertBusy(() -> { - assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); - assertThat(indexer.getLastCheckpoint().getCheckpoint(), equalTo(-1L)); - }); + indexer.waitUntilFinished(); + assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); + assertThat(indexer.getLastCheckpoint().getCheckpoint(), equalTo(-1L)); } /** @@ -741,11 +767,10 @@ public void testForceStopBeforeIndexingThreadStarts() throws Exception { // now let the indexer thread run startLatch.countDown(); + indexer.waitUntilFinished(); - assertBusy(() -> { - assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); - assertThat(indexer.getLastCheckpoint().getCheckpoint(), equalTo(-1L)); - }); + assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); + assertThat(indexer.getLastCheckpoint().getCheckpoint(), equalTo(-1L)); } /** @@ -798,6 +823,59 @@ public void testStopWaitForCheckpointBeforeIndexingThreadStarts() throws Excepti }); } + /** + * Given the indexer thread is reloading the transform's Config + * When a user calls DELETE _transform/id + * Then the indexer thread should exit early without failing the transform + */ + public void testDeleteTransformBeforeConfigReload() throws Exception { + var contextListener = mock(TransformContext.Listener.class); + var context = new TransformContext(TransformTaskState.STARTED, "", 0, contextListener); + var config = createTransformConfig(); + + var configManager = spy(transformConfigManager); + + var indexer = new MockedTransformIndexer( + threadPool, + new TransformServices( + configManager, + mock(TransformCheckpointService.class), + auditor, + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO), + mock(TransformNode.class) + ), + new MockTimebasedCheckpointProvider(config), + config, + new AtomicReference<>(IndexerState.STARTED), + null, + new TransformIndexerStats(), + context + ); + + indexer.initialize(); + + // stop the indexer thread once it kicks off + var startLatch = indexer.createAwaitForStartLatch(1); + assertEquals(IndexerState.STARTED, indexer.start()); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertEquals(IndexerState.INDEXING, indexer.getState()); + + // delete the transform, equivalent to DELETE _transform/id + doAnswer(ans -> { + indexer.abort(); + return ans.callRealMethod(); + }).when(configManager).getTransformConfiguration(eq(config.getId()), any()); + + // now let the indexer thread run + startLatch.countDown(); + indexer.waitUntilFinished(); + + assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); + assertThat(indexer.getLastCheckpoint().getCheckpoint(), equalTo(-1L)); + verify(contextListener, never()).fail(any(), any(), any()); + verify(contextListener).shutdown(); + } + @TestIssueLogging( value = "org.elasticsearch.xpack.transform.transforms:DEBUG", issueUrl = "https://github.com/elastic/elasticsearch/issues/92069" From 768a001543ce4ef102f9a4b34e1da395b99bd27a Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 30 Apr 2024 15:42:02 +0300 Subject: [PATCH 048/244] Cluster-state based Security role mapper (#107410) This implements a new UserRoleMapper that sources the role mapping rules from the cluster state. The role mapping rules are stored under a new custom cluster state that is persisted (both disk and snapshots). The role mapper refreshes realm caches when role mapping changes are published. The role mapper is disabled by default, and it can only be enabled from code, by other plugins. When enabled, the cluster state role mappings rules, if any, are additive to the rules from the index-based native role mapping store and the file-based DN one. --- docs/changelog/107410.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/core/XPackClientPlugin.java | 3 + .../elasticsearch/xpack/core/XPackPlugin.java | 2 + .../support/mapper/ExpressionRoleMapping.java | 31 ++ .../mapper/expressiondsl/AllExpression.java | 3 +- .../mapper/expressiondsl/AnyExpression.java | 3 +- .../security/authz/RoleMappingMetadata.java | 129 +++++ .../authc/jwt/JwtRealmSingleNodeTests.java | 43 +- .../authc/jwt/JwtRoleMappingsIntegTests.java | 483 ++++++++++++++++++ .../DisableNativeRoleMappingsStoreTests.java | 37 +- .../xpack/security/Security.java | 14 +- .../xpack/security/authc/InternalRealms.java | 18 +- .../authc/kerberos/KerberosRealm.java | 11 +- .../xpack/security/authc/ldap/LdapRealm.java | 6 +- .../xpack/security/authc/pki/PkiRealm.java | 6 +- .../mapper/ClusterStateRoleMapper.java | 91 ++++ .../support/mapper/CompositeRoleMapper.java | 16 +- .../mapper/NativeRoleMappingStore.java | 15 +- .../test/SecuritySettingsSource.java | 31 +- .../security/authc/InternalRealmsTests.java | 129 +++++ .../authc/RoleMappingMetadataTests.java | 86 ++++ .../mapper/ClusterStateRoleMapperTests.java | 166 ++++++ .../mapper/CompositeRoleMapperTests.java | 97 ++++ .../mapper/ExpressionRoleMappingTests.java | 2 +- 25 files changed, 1328 insertions(+), 100 deletions(-) create mode 100644 docs/changelog/107410.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java create mode 100644 x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRoleMappingsIntegTests.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RoleMappingMetadataTests.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapperTests.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapperTests.java diff --git a/docs/changelog/107410.yaml b/docs/changelog/107410.yaml new file mode 100644 index 0000000000000..5026e88cfa762 --- /dev/null +++ b/docs/changelog/107410.yaml @@ -0,0 +1,5 @@ +pr: 107410 +summary: Cluster-state based Security role mapper +area: Authorization +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 6a53829099223..a53cbb4a26c79 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -185,6 +185,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_MV_ORDERING_SORTED_ASCENDING = def(8_644_00_0); public static final TransportVersion ESQL_PAGE_MAPPING_TO_ITERATOR = def(8_645_00_0); public static final TransportVersion BINARY_PIT_ID = def(8_646_00_0); + public static final TransportVersion SECURITY_ROLE_MAPPINGS_IN_CLUSTER_STATE = def(8_647_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index df19648307a0b..9435dd56d4095 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -81,6 +81,7 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExceptExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.slm.SLMFeatureSetUsage; @@ -154,6 +155,8 @@ public List getNamedWriteables() { ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom ), // security : role-mappings + new NamedWriteableRegistry.Entry(Metadata.Custom.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::new), + new NamedWriteableRegistry.Entry(NamedDiff.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AllExpression.NAME, AllExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AnyExpression.NAME, AnyExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, FieldExpression.NAME, FieldExpression::new), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 1826146a5c7c0..f79a3fbf124b1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -106,6 +106,7 @@ import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; import org.elasticsearch.xpack.core.security.authc.TokenMetadata; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.termsenum.action.TermsEnumAction; @@ -297,6 +298,7 @@ private static boolean alreadyContainsXPackCustomMetadata(ClusterState clusterSt return metadata.custom(LicensesMetadata.TYPE) != null || metadata.custom(MlMetadata.TYPE) != null || metadata.custom(WatcherMetadata.TYPE) != null + || RoleMappingMetadata.getFromClusterState(clusterState).isEmpty() == false || clusterState.custom(TokenMetadata.TYPE) != null || metadata.custom(TransformMetadata.TYPE) != null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index f0976a058738a..461619f2279f6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.security.authc.support.mapper; +import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -23,6 +24,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; @@ -30,6 +32,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -39,6 +42,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.common.Strings.format; + /** * A representation of a single role-mapping for use in NativeRoleMappingStore. * Logically, this represents a set of roles that should be applied to any user where a boolean @@ -69,6 +74,32 @@ public class ExpressionRoleMapping implements ToXContentObject, Writeable { PARSER.declareString(ignored, new ParseField(UPGRADE_API_TYPE_FIELD)); } + /** + * Given the user information (in the form of {@link UserRoleMapper.UserData}) and a collection of {@link ExpressionRoleMapping}s, + * this returns the set of role names that should be mapped to the user, according to the provided role mapping rules. + */ + public static Set resolveRoles( + UserRoleMapper.UserData user, + Collection mappings, + ScriptService scriptService, + Logger logger + ) { + ExpressionModel model = user.asModel(); + Set roles = mappings.stream() + .filter(ExpressionRoleMapping::isEnabled) + .filter(m -> m.getExpression().match(model)) + .flatMap(m -> { + Set roleNames = m.getRoleNames(scriptService, model); + logger.trace( + () -> format("Applying role-mapping [{}] to user-model [{}] produced role-names [{}]", m.getName(), model, roleNames) + ); + return roleNames.stream(); + }) + .collect(Collectors.toSet()); + logger.debug(() -> format("Mapping user [{}] to roles [{}]", user, roles)); + return roles; + } + private final String name; private final RoleMapperExpression expression; private final List roles; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AllExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AllExpression.java index 001b0a8472d9a..dbf79a69880e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AllExpression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AllExpression.java @@ -25,7 +25,8 @@ public final class AllExpression implements RoleMapperExpression { private final List elements; - AllExpression(List elements) { + // public to be used in tests + public AllExpression(List elements) { assert elements != null; this.elements = elements; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AnyExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AnyExpression.java index 97880fc53c12a..ba49e4596f8a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AnyExpression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/AnyExpression.java @@ -25,7 +25,8 @@ public final class AnyExpression implements RoleMapperExpression { private final List elements; - AnyExpression(List elements) { + // public to be used in tests + public AnyExpression(List elements) { assert elements != null; this.elements = elements; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java new file mode 100644 index 0000000000000..cc523ed55c095 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.authz; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.Metadata.ALL_CONTEXTS; + +public final class RoleMappingMetadata extends AbstractNamedDiffable implements Metadata.Custom { + + public static final String TYPE = "role_mappings"; + + private static final RoleMappingMetadata EMPTY = new RoleMappingMetadata(Set.of()); + + public static RoleMappingMetadata getFromClusterState(ClusterState clusterState) { + return clusterState.metadata().custom(RoleMappingMetadata.TYPE, RoleMappingMetadata.EMPTY); + } + + private final Set roleMappings; + + public RoleMappingMetadata(Set roleMappings) { + this.roleMappings = roleMappings; + } + + public RoleMappingMetadata(StreamInput input) throws IOException { + this.roleMappings = input.readCollectionAsSet(ExpressionRoleMapping::new); + } + + public Set getRoleMappings() { + return this.roleMappings; + } + + public boolean isEmpty() { + return roleMappings.isEmpty(); + } + + public ClusterState updateClusterState(ClusterState clusterState) { + if (isEmpty()) { + // prefer no role mapping custom metadata to the empty role mapping metadata + return clusterState.copyAndUpdateMetadata(b -> b.removeCustom(RoleMappingMetadata.TYPE)); + } else { + return clusterState.copyAndUpdateMetadata(b -> b.putCustom(RoleMappingMetadata.TYPE, this)); + } + } + + public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { + return readDiffFrom(Metadata.Custom.class, TYPE, streamInput); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat(ChunkedToXContentHelper.startArray(TYPE), roleMappings.iterator(), ChunkedToXContentHelper.endArray()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.SECURITY_ROLE_MAPPINGS_IN_CLUSTER_STATE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(roleMappings); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final var other = (RoleMappingMetadata) o; + return Objects.equals(roleMappings, other.roleMappings); + } + + @Override + public int hashCode() { + return Objects.hash(roleMappings); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("RoleMapping[entries=["); + final Iterator entryList = roleMappings.iterator(); + boolean firstEntry = true; + while (entryList.hasNext()) { + if (firstEntry == false) { + builder.append(","); + } + builder.append(entryList.next().toString()); + firstEntry = false; + } + return builder.append("]]").toString(); + } + + @Override + public EnumSet context() { + // It is safest to have this persisted to gateway and snapshots, although maybe redundant. + // The persistence can become an issue in cases where {@link ReservedStateMetadata} + // (which records the names of the role mappings last applied) is persisted, + // but the role mappings themselves (stored here by the {@link RoleMappingMetadata}) + // are not persisted. + return ALL_CONTEXTS; + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java index fba4df3c38031..2ced54a513146 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java @@ -169,6 +169,7 @@ protected String configRoles() { """; } + @Override protected boolean addMockHttpTransport() { return false; } @@ -486,7 +487,9 @@ public void testClientSecretRotation() throws Exception { .expirationTime(Date.from(Instant.now().plusSeconds(600))); assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)) + .getStatusLine() + .getStatusCode() ); // valid jwt for realm1 JWTClaimsSet.Builder jwt1Claims = new JWTClaimsSet.Builder(); @@ -499,7 +502,9 @@ public void testClientSecretRotation() throws Exception { .expirationTime(Date.from(Instant.now().plusSeconds(300))); assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)) + .getStatusLine() + .getStatusCode() ); // valid jwt for realm2 JWTClaimsSet.Builder jwt2Claims = new JWTClaimsSet.Builder(); @@ -512,7 +517,9 @@ public void testClientSecretRotation() throws Exception { .expirationTime(Date.from(Instant.now().plusSeconds(300))); assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)) + .getStatusLine() + .getStatusCode() ); final PluginsService plugins = getInstanceFromNode(PluginsService.class); final LocalStateSecurity localStateSecurity = plugins.filterPlugins(LocalStateSecurity.class).findFirst().get(); @@ -541,30 +548,42 @@ public void testClientSecretRotation() throws Exception { // ensure the old value still works for realm 0 (default grace period) assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)) + .getStatusLine() + .getStatusCode() ); assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), "realm0updatedSecret")).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt0Claims.build()), "realm0updatedSecret")) + .getStatusLine() + .getStatusCode() ); // ensure the old value still works for realm 1 (explicit grace period) assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)) + .getStatusLine() + .getStatusCode() ); assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), "realm1updatedSecret")).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt1Claims.build()), "realm1updatedSecret")) + .getStatusLine() + .getStatusCode() ); // ensure the old value does not work for realm 2 (no grace period) ResponseException exception = expectThrows( ResponseException.class, - () -> client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)).getStatusLine().getStatusCode() + () -> client.performRequest(getAuthenticateRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)) + .getStatusLine() + .getStatusCode() ); assertEquals(401, exception.getResponse().getStatusLine().getStatusCode()); assertEquals( 200, - client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), "realm2updatedSecret")).getStatusLine().getStatusCode() + client.performRequest(getAuthenticateRequest(getSignedJWT(jwt2Claims.build()), "realm2updatedSecret")) + .getStatusLine() + .getStatusCode() ); } finally { // update them back to their original values @@ -688,7 +707,7 @@ public void testValidationDuringReloadingClientSecrets() { } } - private SignedJWT getSignedJWT(JWTClaimsSet claimsSet, byte[] hmacKeyBytes) throws Exception { + static SignedJWT getSignedJWT(JWTClaimsSet claimsSet, byte[] hmacKeyBytes) throws Exception { JWSHeader jwtHeader = new JWSHeader.Builder(JWSAlgorithm.HS256).build(); OctetSequenceKey.Builder jwt0signer = new OctetSequenceKey.Builder(hmacKeyBytes); jwt0signer.algorithm(JWSAlgorithm.HS256); @@ -701,7 +720,7 @@ private SignedJWT getSignedJWT(JWTClaimsSet claimsSet) throws Exception { return getSignedJWT(claimsSet, jwtHmacKey.getBytes(StandardCharsets.UTF_8)); } - private Request getRequest(SignedJWT jwt, String sharedSecret) { + static Request getAuthenticateRequest(SignedJWT jwt, String sharedSecret) { Request request = new Request("GET", "/_security/_authenticate"); RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); options.addHeader("Authorization", "Bearer " + jwt.serialize()); @@ -768,7 +787,7 @@ private ThreadContext prepareThreadContext(SignedJWT signedJWT, String clientSec return threadContext; } - private static GrantApiKeyRequest getGrantApiKeyForJWT(SignedJWT signedJWT, String sharedSecret) { + static GrantApiKeyRequest getGrantApiKeyForJWT(SignedJWT signedJWT, String sharedSecret) { GrantApiKeyRequest grantApiKeyRequest = new GrantApiKeyRequest(); grantApiKeyRequest.getGrant().setType("access_token"); grantApiKeyRequest.getGrant().setAccessToken(new SecureString(signedJWT.serialize().toCharArray())); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRoleMappingsIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRoleMappingsIntegTests.java new file mode 100644 index 0000000000000..0a4a379e3a060 --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRoleMappingsIntegTests.java @@ -0,0 +1,483 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authc.jwt; + +import com.nimbusds.jwt.JWTClaimsSet; +import com.nimbusds.jwt.SignedJWT; + +import org.elasticsearch.client.Response; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.script.mustache.MustachePlugin; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AnyExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.rest.ESRestTestCase.entityAsMap; +import static org.elasticsearch.xpack.security.authc.jwt.JwtRealmSingleNodeTests.getAuthenticateRequest; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; + +public final class JwtRoleMappingsIntegTests extends SecurityIntegTestCase { + + private final String jwt0SharedSecret = "jwt0_shared_secret"; + private final String jwt1SharedSecret = "jwt1_shared_secret"; + private final String jwtHmacKey = "test-HMAC/secret passphrase-value"; + private static boolean anonymousRole; + + @BeforeClass + public static void beforeTests() { + anonymousRole = randomBoolean(); + } + + @Override + protected Collection> getMockPlugins() { + final ArrayList> plugins = new ArrayList<>(super.getMockPlugins()); + plugins.add(MustachePlugin.class); + return List.copyOf(plugins); + } + + @Before + private void clearRoleMappings() throws InterruptedException { + publishRoleMappings(Set.of()); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + Settings.Builder builder = Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + // some tests make use of cluster-state based role mappings + .put("xpack.security.authc.cluster_state_role_mappings.enabled", true) + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), randomBoolean()) + // 1st JWT realm + .put("xpack.security.authc.realms.jwt.jwt0.order", 10) + .put( + randomBoolean() + ? Settings.builder().put("xpack.security.authc.realms.jwt.jwt0.token_type", "id_token").build() + : Settings.EMPTY + ) + .put("xpack.security.authc.realms.jwt.jwt0.allowed_issuer", "my-issuer-01") + .put("xpack.security.authc.realms.jwt.jwt0.allowed_audiences", "es-01") + .put("xpack.security.authc.realms.jwt.jwt0.claims.principal", "sub") + .put("xpack.security.authc.realms.jwt.jwt0.claims.groups", "groups") + .put("xpack.security.authc.realms.jwt.jwt0.client_authentication.type", "shared_secret") + .putList("xpack.security.authc.realms.jwt.jwt0.allowed_signature_algorithms", "HS256", "HS384") + // 2nd JWT realm + .put("xpack.security.authc.realms.jwt.jwt1.order", 20) + .put("xpack.security.authc.realms.jwt.jwt1.token_type", "access_token") + .put("xpack.security.authc.realms.jwt.jwt1.allowed_issuer", "my-issuer-02") + .put("xpack.security.authc.realms.jwt.jwt1.allowed_subjects", "user-02") + .put("xpack.security.authc.realms.jwt.jwt1.allowed_audiences", "es-02") + .put("xpack.security.authc.realms.jwt.jwt1.fallback_claims.sub", "client_id") + .put("xpack.security.authc.realms.jwt.jwt1.claims.principal", "appId") + .put("xpack.security.authc.realms.jwt.jwt1.claims.groups", "groups") + .put("xpack.security.authc.realms.jwt.jwt1.client_authentication.type", "shared_secret") + .putList("xpack.security.authc.realms.jwt.jwt1.allowed_signature_algorithms", "HS256", "HS384"); + if (anonymousRole) { + builder.put("xpack.security.authc.anonymous.roles", "testAnonymousRole"); + } + SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { + secureSettings.setString("xpack.security.authc.realms.jwt.jwt0.hmac_key", jwtHmacKey); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt0.client_authentication.shared_secret", jwt0SharedSecret); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt1.hmac_key", jwtHmacKey); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt1.client_authentication.shared_secret", jwt1SharedSecret); + }); + return builder.build(); + } + + @Override + protected boolean addMockHttpTransport() { + return false; + } + + @SuppressWarnings("unchecked") + public void testUsernameRoleMappingForJWT() throws Exception { + String username1 = "me"; + String username2 = "someoneElse"; + String roleName = randomAlphaOfLength(8); + // role mapping for username1 + ExpressionRoleMapping mapping1 = new ExpressionRoleMapping( + "test-username-expression", + new FieldExpression("username", List.of(new FieldExpression.FieldValue(username1))), + List.of(roleName), + List.of(), + Map.of(), + true + ); + publishRoleMappings(Set.of(mapping1)); + // JWT "id_token" valid for jwt0 + // jwt for username1 + SignedJWT username1Jwt = getSignedJWT( + new JWTClaimsSet.Builder().audience("es-01") + .issuer("my-issuer-01") + .subject(username1) + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(600))) + .build() + ); + // jwt for username2 + // JWT "id_token" valid for jwt0 + SignedJWT username2Jwt = getSignedJWT( + new JWTClaimsSet.Builder().audience("es-01") + .issuer("my-issuer-01") + .subject(username2) + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(600))) + .build() + ); + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(username1Jwt, jwt0SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + if (anonymousRole) { + assertThat( + (List) authenticateResponseMap.get("roles"), + containsInAnyOrder(equalTo(roleName), equalTo("testAnonymousRole")) + ); + } else { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo(roleName))); + } + } + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(username2Jwt, jwt0SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + if (anonymousRole) { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo("testAnonymousRole"))); + } else { + assertThat((List) authenticateResponseMap.get("roles"), emptyIterable()); + } + } + // role mapping for username2 + if (randomBoolean()) { + // overwrite the existing mapping for username1 to work for username2 instead + ExpressionRoleMapping mapping2 = new ExpressionRoleMapping( + "test-username-expression", + new FieldExpression("username", List.of(new FieldExpression.FieldValue(username2))), + List.of(roleName), + List.of(), + Map.of(), + true + ); + publishRoleMappings(Set.of(mapping2)); + } else { + // inactivate existing mapping for username1 + if (randomBoolean()) { + // disable + mapping1 = new ExpressionRoleMapping( + "test-username-expression", + new FieldExpression("username", List.of(new FieldExpression.FieldValue(username1))), + List.of(roleName), + List.of(), + Map.of(), + false + ); + } else { + // change incompatibly + mapping1 = new ExpressionRoleMapping( + "test-username-expression", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("WRONG"))), + List.of(roleName), + List.of(), + Map.of(), + true + ); + } + // add the new mapping for username2 + ExpressionRoleMapping mapping2 = new ExpressionRoleMapping( + "test-username-expression-2", + new FieldExpression("username", List.of(new FieldExpression.FieldValue(username2))), + List.of(roleName), + List.of(), + Map.of(), + true + ); + publishRoleMappings(Set.of(mapping1, mapping2)); + } + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(username1Jwt, jwt0SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + if (anonymousRole) { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo("testAnonymousRole"))); + } else { + assertThat((List) authenticateResponseMap.get("roles"), emptyIterable()); + } + } + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(username2Jwt, jwt0SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + if (anonymousRole) { + assertThat( + (List) authenticateResponseMap.get("roles"), + containsInAnyOrder(equalTo(roleName), equalTo("testAnonymousRole")) + ); + } else { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo(roleName))); + } + } + } + + @SuppressWarnings("unchecked") + public void testGroupsRoleMappingForJWT() throws Exception { + // JWT "access_token" valid for jwt2 + SignedJWT signedJWT = getSignedJWT( + new JWTClaimsSet.Builder().audience("es-02") + .issuer("my-issuer-02") + .subject("user-02") + .claim("groups", List.of("adminGroup", "superUserGroup")) + .claim("appId", "appIdSubject") + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(300))) + .build() + ); + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(signedJWT, jwt1SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + // no role mapping + if (anonymousRole) { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo("testAnonymousRole"))); + } else { + assertThat((List) authenticateResponseMap.get("roles"), emptyIterable()); + } + } + RoleMapperExpression roleMapperExpression = new AnyExpression( + List.of( + new FieldExpression("groups", List.of(new FieldExpression.FieldValue("adminGroup"))), + new AllExpression( + List.of( + new FieldExpression("groups", List.of(new FieldExpression.FieldValue("superUserGroup"))), + new FieldExpression("metadata.jwt_claim_iss", List.of(new FieldExpression.FieldValue("WRONG"))) + ) + ) + ) + ); + ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "test-username-expression", + roleMapperExpression, + List.of("role1", "role2"), + List.of(), + Map.of(), + true + ); + publishRoleMappings(Set.of(mapping)); + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(signedJWT, jwt1SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + // groups based role mapping + if (anonymousRole) { + assertThat( + (List) authenticateResponseMap.get("roles"), + containsInAnyOrder(equalTo("role1"), equalTo("role2"), equalTo("testAnonymousRole")) + ); + } else { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo("role1"), equalTo("role2"))); + } + } + // clear off all the role mappings + publishRoleMappings(Set.of()); + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(signedJWT, jwt1SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + // no role mapping + if (anonymousRole) { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo("testAnonymousRole"))); + } else { + assertThat((List) authenticateResponseMap.get("roles"), emptyIterable()); + } + } + // reinstate the same role mapping expression but with different roles + publishRoleMappings(Set.of()); + ExpressionRoleMapping mapping2 = new ExpressionRoleMapping( + "test-username-expression", + roleMapperExpression, + List.of("role3"), + List.of(), + Map.of(), + true + ); + publishRoleMappings(Set.of(mapping2)); + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(signedJWT, jwt1SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + if (anonymousRole) { + assertThat( + (List) authenticateResponseMap.get("roles"), + containsInAnyOrder(equalTo("testAnonymousRole"), equalTo("role3")) + ); + } else { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo("role3"))); + } + } + } + + @SuppressWarnings("unchecked") + public void testRoleTemplatesMetadataForJWT() throws Exception { + SignedJWT jwt = getSignedJWT( + new JWTClaimsSet.Builder().audience("es-02") + .issuer("my-issuer-02") + .subject("user-02") + .claim("groups", List.of("adminGroup", "superUserGroup")) + .claim("appId", "testAppId") + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(300))) + .build() + ); + RoleMapperExpression roleMapperExpression = new AnyExpression( + List.of( + new AllExpression( + List.of( + new FieldExpression( + "groups", + List.of(new FieldExpression.FieldValue("superUserGroup"), new FieldExpression.FieldValue("adminGroup")) + ), + new FieldExpression("metadata.jwt_claim_appId", List.of(new FieldExpression.FieldValue("testAppId"))) + ) + ) + ) + ); + TemplateRoleName templateRoleName = new TemplateRoleName(new BytesArray(""" + {"source":"[\\"{{metadata.jwt_claim_iss}}\\",\\"{{#join}}metadata.jwt_claim_aud{{/join}}\\"]"} + """), TemplateRoleName.Format.JSON); + ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "test-username-expression", + roleMapperExpression, + List.of(), + List.of(templateRoleName), + Map.of(), + true + ); + publishRoleMappings(Set.of(mapping)); + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(jwt, jwt1SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + // no role mapping + if (anonymousRole) { + assertThat( + (List) authenticateResponseMap.get("roles"), + containsInAnyOrder(equalTo("my-issuer-02"), equalTo("es-02"), equalTo("testAnonymousRole")) + ); + } else { + assertThat( + (List) authenticateResponseMap.get("roles"), + containsInAnyOrder(equalTo("my-issuer-02"), equalTo("es-02")) + ); + } + } + ExpressionRoleMapping disabledMapping = new ExpressionRoleMapping( + "test-username-expression", + roleMapperExpression, + List.of(), + List.of(templateRoleName), + Map.of(), + false + ); + ExpressionRoleMapping anotherMapping = new ExpressionRoleMapping( + randomFrom("test-username-expression", "another-expression"), // name for the mapping is not important + new FieldExpression("username", List.of(new FieldExpression.FieldValue("testAppId"))), + List.of(), + List.of(new TemplateRoleName(new BytesArray(""" + {"source":"{{realm.name}}"}"""), TemplateRoleName.Format.STRING)), + Map.of(), + true + ); + // disabling or removing the mapping is equivalent + if (randomBoolean()) { + publishRoleMappings(Set.of(disabledMapping, anotherMapping)); + } else { + publishRoleMappings(Set.of(anotherMapping)); + } + { + Response authenticateResponse = getRestClient().performRequest(getAuthenticateRequest(jwt, jwt1SharedSecret)); + assertEquals(200, authenticateResponse.getStatusLine().getStatusCode()); + Map authenticateResponseMap = entityAsMap(authenticateResponse); + // no role mapping + if (anonymousRole) { + assertThat( + (List) authenticateResponseMap.get("roles"), + containsInAnyOrder(equalTo("jwt1"), equalTo("testAnonymousRole")) + ); + } else { + assertThat((List) authenticateResponseMap.get("roles"), containsInAnyOrder(equalTo("jwt1"))); + } + } + } + + private SignedJWT getSignedJWT(JWTClaimsSet claimsSet) throws Exception { + return JwtRealmSingleNodeTests.getSignedJWT(claimsSet, jwtHmacKey.getBytes(StandardCharsets.UTF_8)); + } + + private void publishRoleMappings(Set roleMappings) throws InterruptedException { + RoleMappingMetadata roleMappingMetadata = new RoleMappingMetadata(roleMappings); + List clusterServices = new ArrayList<>(); + internalCluster().getInstances(ClusterService.class).forEach(clusterServices::add); + CountDownLatch publishedClusterState = new CountDownLatch(clusterServices.size()); + for (ClusterService clusterService : clusterServices) { + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + RoleMappingMetadata publishedRoleMappingMetadata = RoleMappingMetadata.getFromClusterState(event.state()); + if (roleMappingMetadata.equals(publishedRoleMappingMetadata)) { + clusterService.removeListener(this); + publishedClusterState.countDown(); + } + } + }); + } + ClusterService masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + masterClusterService.submitUnbatchedStateUpdateTask("test-add-role-mapping", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return roleMappingMetadata.updateClusterState(currentState); + } + + @Override + public void onFailure(Exception e) { + fail(e); + for (int i = 0; i < clusterServices.size(); i++) { + publishedClusterState.countDown(); + } + } + }); + boolean awaitSuccessful = publishedClusterState.await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/DisableNativeRoleMappingsStoreTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/DisableNativeRoleMappingsStoreTests.java index 4f56d783e117c..27ceb8d6ed18c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/DisableNativeRoleMappingsStoreTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/store/DisableNativeRoleMappingsStoreTests.java @@ -12,9 +12,7 @@ import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; @@ -26,8 +24,6 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; -import java.util.ArrayList; -import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; @@ -41,15 +37,15 @@ public class DisableNativeRoleMappingsStoreTests extends SecurityIntegTestCase { @Override - protected Collection> nodePlugins() { - List> plugins = new ArrayList<>(super.nodePlugins()); - plugins.add(PrivateCustomPlugin.class); - return plugins; + protected boolean addMockHttpTransport() { + return false; // need real http } @Override - protected boolean addMockHttpTransport() { - return false; // need real http + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + final Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); + builder.put("xpack.security.authc.native_role_mappings.enabled", "false"); + return builder.build(); } public void testPutRoleMappingDisallowed() { @@ -133,25 +129,4 @@ public void testResolveRoleMappings() throws Exception { nativeRoleMappingStore.resolveRoles(userData, future); assertThat(future.get(), emptyIterable()); } - - public static class PrivateCustomPlugin extends Plugin { - - public static final Setting NATIVE_ROLE_MAPPINGS_SETTING = Setting.boolSetting( - "xpack.security.authc.native_role_mappings.enabled", - true, - Setting.Property.NodeScope - ); - - public PrivateCustomPlugin() {} - - @Override - public Settings additionalSettings() { - return Settings.builder().put(NATIVE_ROLE_MAPPINGS_SETTING.getKey(), false).build(); - } - - @Override - public List> getSettings() { - return List.of(NATIVE_ROLE_MAPPINGS_SETTING); - } - } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 2e233f7beda76..ef08f855a46cc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -190,6 +190,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.Subject; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; @@ -292,6 +293,8 @@ import org.elasticsearch.xpack.security.authc.service.ServiceAccountService; import org.elasticsearch.xpack.security.authc.support.SecondaryAuthActions; import org.elasticsearch.xpack.security.authc.support.SecondaryAuthenticator; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; +import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.authz.AuthorizationDenialMessages; import org.elasticsearch.xpack.security.authz.AuthorizationService; @@ -764,6 +767,8 @@ Collection createComponents( systemIndices.getMainIndexManager(), scriptService ); + final ClusterStateRoleMapper clusterStateRoleMapper = new ClusterStateRoleMapper(settings, scriptService, clusterService); + final UserRoleMapper userRoleMapper = new CompositeRoleMapper(nativeRoleMappingStore, clusterStateRoleMapper); final AnonymousUser anonymousUser = new AnonymousUser(settings); components.add(anonymousUser); final ReservedRealm reservedRealm = new ReservedRealm(environment, settings, nativeUsersStore, anonymousUser, threadPool); @@ -772,7 +777,7 @@ Collection createComponents( client, clusterService, resourceWatcherService, - nativeRoleMappingStore + userRoleMapper ); Map realmFactories = new HashMap<>( InternalRealms.getFactories( @@ -781,7 +786,7 @@ Collection createComponents( resourceWatcherService, getSslService(), nativeUsersStore, - nativeRoleMappingStore, + userRoleMapper, systemIndices.getMainIndexManager() ) ); @@ -802,9 +807,10 @@ Collection createComponents( reservedRealm ); components.add(nativeUsersStore); - components.add(nativeRoleMappingStore); - components.add(realms); + components.add(new PluginComponentBinding<>(NativeRoleMappingStore.class, nativeRoleMappingStore)); + components.add(new PluginComponentBinding<>(UserRoleMapper.class, userRoleMapper)); components.add(reservedRealm); + components.add(realms); this.realms.set(realms); systemIndices.getMainIndexManager().addStateListener(nativeRoleMappingStore::onSecurityIndexStateChange); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java index 625f91d6f7749..c9c8f156cd5e7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java @@ -26,6 +26,7 @@ import org.elasticsearch.xpack.core.security.authc.oidc.OpenIdConnectRealmSettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.Security; import org.elasticsearch.xpack.security.authc.esnative.NativeRealm; @@ -39,7 +40,6 @@ import org.elasticsearch.xpack.security.authc.pki.PkiRealm; import org.elasticsearch.xpack.security.authc.saml.SamlRealm; import org.elasticsearch.xpack.security.authc.support.RoleMappingFileBootstrapCheck; -import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.Collection; @@ -134,7 +134,7 @@ public static Map getFactories( ResourceWatcherService resourceWatcherService, SSLService sslService, NativeUsersStore nativeUsersStore, - NativeRoleMappingStore nativeRoleMappingStore, + UserRoleMapper userRoleMapper, SecurityIndexManager securityIndex ) { return Map.of( @@ -146,25 +146,25 @@ public static Map getFactories( config -> buildNativeRealm(threadPool, settings, nativeUsersStore, securityIndex, config), // active directory realm LdapRealmSettings.AD_TYPE, - config -> new LdapRealm(config, sslService, resourceWatcherService, nativeRoleMappingStore, threadPool), + config -> new LdapRealm(config, sslService, resourceWatcherService, userRoleMapper, threadPool), // LDAP realm LdapRealmSettings.LDAP_TYPE, - config -> new LdapRealm(config, sslService, resourceWatcherService, nativeRoleMappingStore, threadPool), + config -> new LdapRealm(config, sslService, resourceWatcherService, userRoleMapper, threadPool), // PKI realm PkiRealmSettings.TYPE, - config -> new PkiRealm(config, resourceWatcherService, nativeRoleMappingStore), + config -> new PkiRealm(config, resourceWatcherService, userRoleMapper), // SAML realm SamlRealmSettings.TYPE, - config -> SamlRealm.create(config, sslService, resourceWatcherService, nativeRoleMappingStore), + config -> SamlRealm.create(config, sslService, resourceWatcherService, userRoleMapper), // Kerberos realm KerberosRealmSettings.TYPE, - config -> new KerberosRealm(config, nativeRoleMappingStore, threadPool), + config -> new KerberosRealm(config, userRoleMapper, threadPool), // OpenID Connect realm OpenIdConnectRealmSettings.TYPE, - config -> new OpenIdConnectRealm(config, sslService, nativeRoleMappingStore, resourceWatcherService), + config -> new OpenIdConnectRealm(config, sslService, userRoleMapper, resourceWatcherService), // JWT realm JwtRealmSettings.TYPE, - config -> new JwtRealm(config, sslService, nativeRoleMappingStore) + config -> new JwtRealm(config, sslService, userRoleMapper) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java index d1cff736ef40c..6601d27d5a431 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealm.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; -import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.ietf.jgss.GSSException; import java.nio.file.Files; @@ -64,7 +63,7 @@ public final class KerberosRealm extends Realm implements CachingRealm { public static final String KRB_METADATA_UPN_KEY = "kerberos_user_principal_name"; private final Cache userPrincipalNameToUserCache; - private final NativeRoleMappingStore userRoleMapper; + private final UserRoleMapper userRoleMapper; private final KerberosTicketValidator kerberosTicketValidator; private final ThreadPool threadPool; private final Path keytabPath; @@ -72,20 +71,20 @@ public final class KerberosRealm extends Realm implements CachingRealm { private final boolean removeRealmName; private DelegatedAuthorizationSupport delegatedRealms; - public KerberosRealm(final RealmConfig config, final NativeRoleMappingStore nativeRoleMappingStore, final ThreadPool threadPool) { - this(config, nativeRoleMappingStore, new KerberosTicketValidator(), threadPool, null); + public KerberosRealm(final RealmConfig config, final UserRoleMapper userRoleMapper, final ThreadPool threadPool) { + this(config, userRoleMapper, new KerberosTicketValidator(), threadPool, null); } // pkg scoped for testing KerberosRealm( final RealmConfig config, - final NativeRoleMappingStore nativeRoleMappingStore, + final UserRoleMapper userRoleMapper, final KerberosTicketValidator kerberosTicketValidator, final ThreadPool threadPool, final Cache userPrincipalNameToUserCache ) { super(config); - this.userRoleMapper = nativeRoleMappingStore; + this.userRoleMapper = userRoleMapper; this.userRoleMapper.clearRealmCacheOnChange(this); final TimeValue ttl = config.getSetting(KerberosRealmSettings.CACHE_TTL_SETTING); if (ttl.getNanos() > 0) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index 0c66389253e74..1d3c3bf5f0a15 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -39,8 +39,8 @@ import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; +import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; -import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.ReloadableSecurityComponent; import java.util.HashMap; @@ -72,13 +72,13 @@ public LdapRealm( RealmConfig config, SSLService sslService, ResourceWatcherService watcherService, - NativeRoleMappingStore nativeRoleMappingStore, + UserRoleMapper userRoleMapper, ThreadPool threadPool ) throws LDAPException { this( config, sessionFactory(config, sslService, threadPool), - new CompositeRoleMapper(config, watcherService, nativeRoleMappingStore), + new CompositeRoleMapper(new DnRoleMapper(config, watcherService), userRoleMapper), threadPool ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java index 785add149bc00..51d8323ef068b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/pki/PkiRealm.java @@ -32,8 +32,8 @@ import org.elasticsearch.xpack.security.authc.BytesKey; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.support.DelegatedAuthorizationSupport; +import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.CompositeRoleMapper; -import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import java.security.MessageDigest; import java.security.cert.CertificateEncodingException; @@ -81,8 +81,8 @@ public class PkiRealm extends Realm implements CachingRealm { private DelegatedAuthorizationSupport delegatedRealms; private final boolean delegationEnabled; - public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, NativeRoleMappingStore nativeRoleMappingStore) { - this(config, new CompositeRoleMapper(config, watcherService, nativeRoleMappingStore)); + public PkiRealm(RealmConfig config, ResourceWatcherService watcherService, UserRoleMapper userRoleMapper) { + this(config, new CompositeRoleMapper(new DnRoleMapper(config, watcherService), userRoleMapper)); } // pkg private for testing diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java new file mode 100644 index 0000000000000..a31da43021c89 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authc.support.mapper; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; + +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.xpack.core.security.SecurityExtension.SecurityComponents; + +/** + * A role mapper the reads the role mapping rules (i.e. {@link ExpressionRoleMapping}s) from the cluster state + * (i.e. {@link RoleMappingMetadata}). This is not enabled by default. + */ +public final class ClusterStateRoleMapper extends AbstractRoleMapperClearRealmCache implements ClusterStateListener { + + /** + * This setting is never registered by the xpack security plugin - in order to enable the + * cluster-state based role mapper another plugin must register it as a boolean setting + * and set it to `true`. + * If this setting is set to true then: + *
    + *
  • Realms that make use role mappings (all realms but file and native) will, + * in addition, observe the role mappings set in the cluster state.
  • + *
  • Similarly, xpack security's {@link SecurityComponents} extensions will, + * additionally, observe the cluster state role mappings too.
  • + *
  • {@link UserRoleMapper} class will be guice-bound to a {@link CompositeRoleMapper} + * of the {@link NativeRoleMappingStore} and this mapper.
  • + *
+ */ + public static final String CLUSTER_STATE_ROLE_MAPPINGS_ENABLED = "xpack.security.authc.cluster_state_role_mappings.enabled"; + private static final Logger logger = LogManager.getLogger(ClusterStateRoleMapper.class); + + private final ScriptService scriptService; + private final ClusterService clusterService; + private final boolean enabled; + + public ClusterStateRoleMapper(Settings settings, ScriptService scriptService, ClusterService clusterService) { + this.scriptService = scriptService; + this.clusterService = clusterService; + // this role mapper is disabled by default and only code in other plugins can enable it + this.enabled = settings.getAsBoolean(CLUSTER_STATE_ROLE_MAPPINGS_ENABLED, false); + if (this.enabled) { + clusterService.addListener(this); + } + } + + @Override + public void resolveRoles(UserData user, ActionListener> listener) { + listener.onResponse(ExpressionRoleMapping.resolveRoles(user, getMappings(), scriptService, logger)); + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + // The cluster state (which contains the new role mappings) is already applied when this listener is called, + // such that {@link #resolveRoles} will be returning the new role mappings when called after this is called + if (enabled + && false == Objects.equals( + RoleMappingMetadata.getFromClusterState(event.previousState()), + RoleMappingMetadata.getFromClusterState(event.state()) + )) { + // trigger realm cache clear, even if only disabled role mappings have changed + // ideally disabled role mappings should not be published in the cluster state + clearRealmCachesOnLocalNode(); + } + } + + private Set getMappings() { + if (enabled == false) { + return Set.of(); + } else { + return RoleMappingMetadata.getFromClusterState(clusterService.state()).getRoleMappings(); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java index 12b5ddc07786c..74966f07098a2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapper.java @@ -8,11 +8,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.GroupedActionListener; -import org.elasticsearch.watcher.ResourceWatcherService; -import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; -import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; import java.util.ArrayList; import java.util.Arrays; @@ -27,17 +24,9 @@ */ public class CompositeRoleMapper implements UserRoleMapper { - private List delegates; + private final List delegates; - public CompositeRoleMapper( - RealmConfig realmConfig, - ResourceWatcherService watcherService, - NativeRoleMappingStore nativeRoleMappingStore - ) { - this(new DnRoleMapper(realmConfig, watcherService), nativeRoleMappingStore); - } - - private CompositeRoleMapper(UserRoleMapper... delegates) { + public CompositeRoleMapper(UserRoleMapper... delegates) { this.delegates = new ArrayList<>(Arrays.asList(delegates)); } @@ -57,5 +46,4 @@ public void resolveRoles(UserData user, ActionListener> listener) { public void clearRealmCacheOnChange(CachingRealm realm) { this.delegates.forEach(mapper -> mapper.clearRealmCacheOnChange(realm)); } - } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index cd1291f7379cb..7f35415d6f630 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -36,7 +36,6 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; -import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.security.support.SecuritySystemIndices; @@ -49,7 +48,6 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; -import java.util.stream.Collectors; import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.DELETED; @@ -399,18 +397,7 @@ public void onSecurityIndexStateChange(SecurityIndexManager.State previousState, @Override public void resolveRoles(UserData user, ActionListener> listener) { getRoleMappings(null, ActionListener.wrap(mappings -> { - final ExpressionModel model = user.asModel(); - final Set roles = mappings.stream() - .filter(ExpressionRoleMapping::isEnabled) - .filter(m -> m.getExpression().match(model)) - .flatMap(m -> { - final Set roleNames = m.getRoleNames(scriptService, model); - logger.trace("Applying role-mapping [{}] to user-model [{}] produced role-names [{}]", m.getName(), model, roleNames); - return roleNames.stream(); - }) - .collect(Collectors.toSet()); - logger.debug("Mapping user [{}] to roles [{}]", user, roles); - listener.onResponse(roles); + listener.onResponse(ExpressionRoleMapping.resolveRoles(user, mappings, scriptService, logger)); }, listener::onFailure)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java index 2d2fe2510d435..6d7817db8ec05 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; @@ -190,7 +191,8 @@ public Collection> nodePlugins() { InternalSettingsPlugin.class, MapperExtrasPlugin.class, MainRestPlugin.class, - Wildcard.class + Wildcard.class, + UnregisteredSecuritySettingsPlugin.class ); } @@ -390,4 +392,31 @@ private static Path resolveResourcePath(String resourcePathToStore) { public boolean isSslEnabled() { return sslEnabled; } + + // This plugin registers various normally unregistered settings such that dependent code can be tested. + public static class UnregisteredSecuritySettingsPlugin extends Plugin { + + public static final Setting NATIVE_ROLE_MAPPINGS_SETTING = Setting.boolSetting( + "xpack.security.authc.native_role_mappings.enabled", + true, + Setting.Property.NodeScope + ); + public static final Setting CLUSTER_STATE_ROLE_MAPPINGS_ENABLED = Setting.boolSetting( + "xpack.security.authc.cluster_state_role_mappings.enabled", + false, + Setting.Property.NodeScope + ); + public static final Setting NATIVE_ROLES_ENABLED = Setting.boolSetting( + "xpack.security.authc.native_roles.enabled", + true, + Setting.Property.NodeScope + ); + + public UnregisteredSecuritySettingsPlugin() {} + + @Override + public List> getSettings() { + return List.of(NATIVE_ROLE_MAPPINGS_SETTING, CLUSTER_STATE_ROLE_MAPPINGS_ENABLED, NATIVE_ROLES_ENABLED); + } + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java index c3cf1e8dddc32..21d3467654154 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.PathUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.License; @@ -15,30 +16,44 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.InternalRealmsSettings; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; +import org.elasticsearch.xpack.core.security.authc.kerberos.KerberosRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.LdapRealmSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; +import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.authc.saml.SamlRealm; +import org.elasticsearch.xpack.security.authc.saml.SamlRealmTests; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.hamcrest.Matchers; +import java.nio.file.Path; import java.util.Map; import java.util.Set; import java.util.function.BiConsumer; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.any; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.ArgumentMatchers.isA; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -75,6 +90,120 @@ public void testNativeRealmRegistersIndexHealthChangeListener() throws Exception verify(securityIndex, times(2)).addStateListener(isA(BiConsumer.class)); } + public void testRealmsRegisterForRefreshAtRoleMapper() throws Exception { + UserRoleMapper userRoleMapper = mock(UserRoleMapper.class); + Map factories = InternalRealms.getFactories( + mock(ThreadPool.class), + Settings.EMPTY, + mock(ResourceWatcherService.class), + mock(SSLService.class), + mock(NativeUsersStore.class), + userRoleMapper, + mock(SecurityIndexManager.class) + ); + { + RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier(LdapRealmSettings.AD_TYPE, "test"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) + .put(RealmSettings.getFullSettingKey(realmId, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), "baseDN") + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + final ThreadContext threadContext = new ThreadContext(settings); + assertThat(factories, hasEntry(is(LdapRealmSettings.AD_TYPE), any(Realm.Factory.class))); + var realm = factories.get(LdapRealmSettings.AD_TYPE).create(new RealmConfig(realmId, settings, env, threadContext)); + verify(userRoleMapper, times(1)).clearRealmCacheOnChange(same((CachingRealm) realm)); + } + { + RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier(LdapRealmSettings.LDAP_TYPE, "test"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) + .put(getFullSettingKey(realmId.getName(), LdapUserSearchSessionFactorySettings.SEARCH_BASE_DN), "userSearchBase") + .put(getFullSettingKey(realmId.getName(), LdapUserSearchSessionFactorySettings.POOL_ENABLED), false) + .put(RealmSettings.getFullSettingKey(realmId, SessionFactorySettings.URLS_SETTING), "ldap://127.1.1.1") + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + final ThreadContext threadContext = new ThreadContext(settings); + assertThat(factories, hasEntry(is(LdapRealmSettings.LDAP_TYPE), any(Realm.Factory.class))); + var realm = factories.get(LdapRealmSettings.LDAP_TYPE).create(new RealmConfig(realmId, settings, env, threadContext)); + verify(userRoleMapper, times(1)).clearRealmCacheOnChange(same((CachingRealm) realm)); + } + { + RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier(PkiRealmSettings.TYPE, "test"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + final ThreadContext threadContext = new ThreadContext(settings); + assertThat(factories, hasEntry(is(PkiRealmSettings.TYPE), any(Realm.Factory.class))); + var realm = factories.get(PkiRealmSettings.TYPE).create(new RealmConfig(realmId, settings, env, threadContext)); + verify(userRoleMapper, times(1)).clearRealmCacheOnChange(same((CachingRealm) realm)); + } + final Path metadata = PathUtils.get(SamlRealm.class.getResource("idp1.xml").toURI()); + { + RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier(KerberosRealmSettings.TYPE, "test"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) + .put(getFullSettingKey(realmId.getName(), KerberosRealmSettings.HTTP_SERVICE_KEYTAB_PATH), metadata.toString()) + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + final ThreadContext threadContext = new ThreadContext(settings); + assertThat(factories, hasEntry(is(KerberosRealmSettings.TYPE), any(Realm.Factory.class))); + var realm = factories.get(KerberosRealmSettings.TYPE).create(new RealmConfig(realmId, settings, env, threadContext)); + verify(userRoleMapper, times(1)).clearRealmCacheOnChange(same((CachingRealm) realm)); + } + { + RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier(JwtRealmSettings.TYPE, "test"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.CLIENT_AUTHENTICATION_TYPE), "none") + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.ALLOWED_ISSUER), "mock") + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.ALLOWED_AUDIENCES), "mock") + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.CLAIMS_PRINCIPAL.getClaim()), "principal") + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.CLAIMS_GROUPS.getClaim()), "roles") + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.CLAIMS_DN.getClaim()), "dn") + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.CLAIMS_NAME.getClaim()), "name") + .put(getFullSettingKey(realmId.getName(), JwtRealmSettings.CLAIMS_MAIL.getClaim()), "mail") + .put( + getFullSettingKey(realmId.getName(), JwtRealmSettings.PKC_JWKSET_PATH), + getDataPath("/org/elasticsearch/xpack/security/authc/apikey/rsa-public-jwkset.json") + ) + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + final ThreadContext threadContext = new ThreadContext(settings); + assertThat(factories, hasEntry(is(JwtRealmSettings.TYPE), any(Realm.Factory.class))); + var realm = factories.get(JwtRealmSettings.TYPE).create(new RealmConfig(realmId, settings, env, threadContext)); + verify(userRoleMapper, times(1)).clearRealmCacheOnChange(same((CachingRealm) realm)); + } + { + RealmConfig.RealmIdentifier realmId = new RealmConfig.RealmIdentifier(SamlRealmSettings.TYPE, "test"); + Settings settings = Settings.builder() + .put("path.home", createTempDir()) + .put(RealmSettings.getFullSettingKey(realmId, RealmSettings.ORDER_SETTING), 0) + .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true) + .put(getFullSettingKey(realmId.getName(), SamlRealmSettings.IDP_METADATA_PATH), metadata.toString()) + .put(getFullSettingKey(realmId.getName(), SamlRealmSettings.IDP_ENTITY_ID), SamlRealmTests.TEST_IDP_ENTITY_ID) + .put(getFullSettingKey(realmId.getName(), SamlRealmSettings.SP_ENTITY_ID), "mock") + .put(getFullSettingKey(realmId.getName(), SamlRealmSettings.SP_ACS), "http://mock") + .put(getFullSettingKey(realmId.getName(), SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), "uid") + .build(); + final Environment env = TestEnvironment.newEnvironment(settings); + final ThreadContext threadContext = new ThreadContext(settings); + assertThat(factories, hasEntry(is(SamlRealmSettings.TYPE), any(Realm.Factory.class))); + try ( + SamlRealm ignored = (SamlRealm) factories.get(SamlRealmSettings.TYPE) + .create(new RealmConfig(realmId, settings, env, threadContext)) + ) { + // SAML realm is not caching + verifyNoMoreInteractions(userRoleMapper); + } + } + } + public void testLicenseLevels() { for (String type : InternalRealms.getConfigurableRealmsTypes()) { final LicensedFeature.Persistent feature = InternalRealms.getLicensedFeature(type); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RoleMappingMetadataTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RoleMappingMetadataTests.java new file mode 100644 index 0000000000000..a061106a979d7 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RoleMappingMetadataTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authc; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.xpack.core.XPackClientPlugin; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.xpack.security.authc.support.mapper.ExpressionRoleMappingTests.randomRoleMapping; +import static org.hamcrest.Matchers.is; + +public class RoleMappingMetadataTests extends AbstractWireSerializingTestCase { + + @Override + protected RoleMappingMetadata createTestInstance() { + return new RoleMappingMetadata(randomSet(0, 3, () -> randomRoleMapping(true))); + } + + @Override + protected RoleMappingMetadata mutateInstance(RoleMappingMetadata instance) throws IOException { + Set mutatedRoleMappings = new HashSet<>(instance.getRoleMappings()); + boolean mutated = false; + if (mutatedRoleMappings.isEmpty() == false && randomBoolean()) { + mutated = true; + mutatedRoleMappings.remove(randomFrom(mutatedRoleMappings)); + } + if (randomBoolean() || mutated == false) { + mutatedRoleMappings.add(randomRoleMapping(true)); + } + return new RoleMappingMetadata(mutatedRoleMappings); + } + + @Override + protected Writeable.Reader instanceReader() { + return RoleMappingMetadata::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()); + } + + public void testSerializationBWC() throws IOException { + RoleMappingMetadata original = new RoleMappingMetadata(randomSet(0, 3, () -> randomRoleMapping(true))); + TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_7_2_0, null); + BytesStreamOutput output = new BytesStreamOutput(); + output.setTransportVersion(version); + original.writeTo(output); + StreamInput streamInput = new NamedWriteableAwareStreamInput( + ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), + new NamedWriteableRegistry(new XPackClientPlugin().getNamedWriteables()) + ); + streamInput.setTransportVersion(version); + RoleMappingMetadata deserialized = new RoleMappingMetadata(streamInput); + assertEquals(original, deserialized); + } + + public void testEquals() { + Set roleMappings1 = randomSet(0, 3, () -> randomRoleMapping(true)); + Set roleMappings2 = randomSet(0, 3, () -> randomRoleMapping(true)); + assumeFalse("take 2 different role mappings", roleMappings1.equals(roleMappings2)); + assertThat(new RoleMappingMetadata(roleMappings1).equals(new RoleMappingMetadata(roleMappings2)), is(false)); + assertThat(new RoleMappingMetadata(roleMappings1).equals(new RoleMappingMetadata(roleMappings1)), is(true)); + assertThat(new RoleMappingMetadata(roleMappings2).equals(new RoleMappingMetadata(roleMappings2)), is(true)); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapperTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapperTests.java new file mode 100644 index 0000000000000..7a9dd65f84c67 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapperTests.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authc.support.mapper; + +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; +import org.junit.Before; + +import java.util.Collections; +import java.util.Set; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.same; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class ClusterStateRoleMapperTests extends ESTestCase { + + private ScriptService scriptService; + private ClusterService clusterService; + private Settings enabledSettings; + private Settings disabledSettings; + + @Before + public void setup() { + scriptService = new ScriptService( + Settings.EMPTY, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), + ScriptModule.CORE_CONTEXTS, + () -> 1L + ); + clusterService = mock(ClusterService.class); + enabledSettings = Settings.builder().put("xpack.security.authc.cluster_state_role_mappings.enabled", true).build(); + if (randomBoolean()) { + disabledSettings = Settings.builder().put("xpack.security.authc.cluster_state_role_mappings.enabled", false).build(); + } else { + // the cluster state role mapper is disabled by default + disabledSettings = Settings.EMPTY; + } + } + + public void testRegisterForClusterChangesIfEnabled() { + ClusterStateRoleMapper roleMapper = new ClusterStateRoleMapper(enabledSettings, scriptService, clusterService); + verify(clusterService, times(1)).addListener(same(roleMapper)); + } + + public void testNoRegisterForClusterChangesIfNotEnabled() { + new ClusterStateRoleMapper(disabledSettings, scriptService, clusterService); + verifyNoInteractions(clusterService); + } + + public void testRoleResolving() throws Exception { + UserRoleMapper.UserData userData = mock(UserRoleMapper.UserData.class); + ExpressionModel expressionModel = mock(ExpressionModel.class); + when(userData.asModel()).thenReturn(expressionModel); + ExpressionRoleMapping mapping1 = mockExpressionRoleMapping(false, Set.of("role1"), expressionModel); + ExpressionRoleMapping mapping2 = mockExpressionRoleMapping(true, Set.of("role2")); + ExpressionRoleMapping mapping3 = mockExpressionRoleMapping(true, Set.of("role3"), expressionModel); + RoleMappingMetadata roleMappingMetadata = new RoleMappingMetadata(Set.of(mapping1, mapping2, mapping3)); + ClusterState state = roleMappingMetadata.updateClusterState(ClusterState.builder(new ClusterName("elasticsearch")).build()); + when(clusterService.state()).thenReturn(state); + { + // the role mapper is enabled + ClusterStateRoleMapper roleMapper = new ClusterStateRoleMapper(enabledSettings, scriptService, clusterService); + PlainActionFuture> future = new PlainActionFuture<>(); + roleMapper.resolveRoles(userData, future); + Set roleNames = future.get(); + assertThat(roleNames, contains("role3")); + verify(mapping1).isEnabled(); + verify(mapping2).isEnabled(); + verify(mapping3).isEnabled(); + verify(mapping2).getExpression(); + verify(mapping3).getExpression(); + verify(mapping3).getRoleNames(same(scriptService), same(expressionModel)); + verifyNoMoreInteractions(mapping1, mapping2, mapping3); + } + { + // but if the role mapper is disabled, NO roles are resolved + ClusterStateRoleMapper roleMapper = new ClusterStateRoleMapper(disabledSettings, scriptService, clusterService); + PlainActionFuture> future = new PlainActionFuture<>(); + roleMapper.resolveRoles(userData, future); + Set roleNames = future.get(); + assertThat(roleNames, empty()); + verifyNoMoreInteractions(mapping1, mapping2, mapping3); + } + } + + public void testRoleMappingChangesTriggerRealmCacheClear() { + CachingRealm mockRealm = mock(CachingRealm.class); + ClusterStateRoleMapper roleMapper = new ClusterStateRoleMapper(enabledSettings, scriptService, clusterService); + roleMapper.clearRealmCacheOnChange(mockRealm); + ExpressionRoleMapping mapping1 = mockExpressionRoleMapping(true, Set.of("role"), mock(ExpressionModel.class)); + ExpressionModel model2 = mock(ExpressionModel.class); + ExpressionRoleMapping mapping2 = mockExpressionRoleMapping(true, Set.of("role"), model2); + ExpressionRoleMapping mapping3 = mockExpressionRoleMapping(true, Set.of("role3"), model2); + ClusterState emptyState = ClusterState.builder(new ClusterName("elasticsearch")).build(); + RoleMappingMetadata roleMappingMetadata1 = new RoleMappingMetadata(Set.of(mapping1)); + ClusterState state1 = roleMappingMetadata1.updateClusterState(emptyState); + roleMapper.clusterChanged(new ClusterChangedEvent("test", emptyState, state1)); + verify(mockRealm, times(1)).expireAll(); + RoleMappingMetadata roleMappingMetadata2 = new RoleMappingMetadata(Set.of(mapping2)); + ClusterState state2 = roleMappingMetadata2.updateClusterState(state1); + roleMapper.clusterChanged(new ClusterChangedEvent("test", state1, state2)); + verify(mockRealm, times(2)).expireAll(); + RoleMappingMetadata roleMappingMetadata3 = new RoleMappingMetadata(Set.of(mapping3)); + ClusterState state3 = roleMappingMetadata3.updateClusterState(state2); + roleMapper.clusterChanged(new ClusterChangedEvent("test", state2, state3)); + verify(mockRealm, times(3)).expireAll(); + RoleMappingMetadata roleMappingMetadata4 = new RoleMappingMetadata(Set.of(mapping2, mapping3)); + ClusterState state4 = roleMappingMetadata4.updateClusterState(state3); + roleMapper.clusterChanged(new ClusterChangedEvent("test", state3, state4)); + verify(mockRealm, times(4)).expireAll(); + } + + private ExpressionRoleMapping mockExpressionRoleMapping(boolean enabled, Set roleNames, ExpressionModel... matchingModels) { + ExpressionRoleMapping mapping = mock(ExpressionRoleMapping.class); + when(mapping.isEnabled()).thenReturn(enabled); + RoleMapperExpression roleMapperExpression = mock(RoleMapperExpression.class); + when(mapping.getExpression()).thenReturn(roleMapperExpression); + doAnswer(invocation -> { + ExpressionModel expressionModel = (ExpressionModel) invocation.getArguments()[0]; + for (ExpressionModel matchingModel : matchingModels) { + if (expressionModel.equals(matchingModel)) { + return true; + } + } + return false; + }).when(roleMapperExpression).match(any(ExpressionModel.class)); + doAnswer(invocation -> { + ExpressionModel expressionModel = (ExpressionModel) invocation.getArguments()[1]; + for (ExpressionModel matchingModel : matchingModels) { + if (expressionModel.equals(matchingModel)) { + return roleNames; + } + } + return Set.of(); + }).when(mapping).getRoleNames(same(scriptService), any(ExpressionModel.class)); + return mapping; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapperTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapperTests.java new file mode 100644 index 0000000000000..13cd1290eb43d --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/CompositeRoleMapperTests.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authc.support.mapper; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.authc.support.CachingRealm; +import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class CompositeRoleMapperTests extends ESTestCase { + + public void testClearRealmCachePropagates() { + UserRoleMapper userRoleMapper1 = mock(UserRoleMapper.class); + UserRoleMapper userRoleMapper2 = mock(UserRoleMapper.class); + CompositeRoleMapper compositeRoleMapper = new CompositeRoleMapper(userRoleMapper1, userRoleMapper2); + CachingRealm realm = mock(CachingRealm.class); + compositeRoleMapper.clearRealmCacheOnChange(realm); + verify(userRoleMapper1, times(1)).clearRealmCacheOnChange(eq(realm)); + verify(userRoleMapper2, times(1)).clearRealmCacheOnChange(eq(realm)); + } + + public void testRolesResolveIsCumulative() throws Exception { + UserRoleMapper userRoleMapper1 = mock(UserRoleMapper.class); + Set roles1 = randomSet(0, 3, () -> randomAlphaOfLength(8)); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[1]; + listener.onResponse(roles1); + return null; + }).when(userRoleMapper1).resolveRoles(any(UserRoleMapper.UserData.class), anyActionListener()); + UserRoleMapper userRoleMapper2 = mock(UserRoleMapper.class); + Set roles2 = randomSet(0, 3, () -> randomAlphaOfLength(8)); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[1]; + listener.onResponse(roles2); + return null; + }).when(userRoleMapper2).resolveRoles(any(UserRoleMapper.UserData.class), anyActionListener()); + CompositeRoleMapper compositeRoleMapper = new CompositeRoleMapper(userRoleMapper1, userRoleMapper2); + PlainActionFuture> compositeResolvedRoles = new PlainActionFuture<>(); + compositeRoleMapper.resolveRoles(mock(UserRoleMapper.UserData.class), compositeResolvedRoles); + Set allResolvedRoles = new HashSet<>(); + allResolvedRoles.addAll(roles1); + allResolvedRoles.addAll(roles2); + assertThat(compositeResolvedRoles.get(), equalTo(allResolvedRoles)); + } + + public void testRolesResolveErrorPropagates() { + UserRoleMapper userRoleMapper1 = mock(UserRoleMapper.class); + Set roles1 = randomSet(0, 3, () -> randomAlphaOfLength(8)); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[1]; + if (randomBoolean()) { + listener.onResponse(roles1); + } else { + listener.onFailure(new Exception("test failure in role mapper 1")); + } + return null; + }).when(userRoleMapper1).resolveRoles(any(UserRoleMapper.UserData.class), anyActionListener()); + UserRoleMapper userRoleMapper2 = mock(UserRoleMapper.class); + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener> listener = (ActionListener>) invocationOnMock.getArguments()[1]; + listener.onFailure(new Exception("test failure in role mapper 2")); + return null; + }).when(userRoleMapper2).resolveRoles(any(UserRoleMapper.UserData.class), anyActionListener()); + CompositeRoleMapper compositeRoleMapper; + if (randomBoolean()) { + compositeRoleMapper = new CompositeRoleMapper(userRoleMapper1, userRoleMapper2); + } else { + compositeRoleMapper = new CompositeRoleMapper(userRoleMapper2, userRoleMapper1); + } + PlainActionFuture> compositeResolvedRoles = new PlainActionFuture<>(); + compositeRoleMapper.resolveRoles(mock(UserRoleMapper.UserData.class), compositeResolvedRoles); + expectThrows(ExecutionException.class, compositeResolvedRoles::get); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 6b675b61c2a6d..cfb85a133719f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -459,7 +459,7 @@ private ExpressionRoleMapping parse(String json, String name, boolean fromIndex) return mapping; } - private ExpressionRoleMapping randomRoleMapping(boolean acceptRoleTemplates) { + public static ExpressionRoleMapping randomRoleMapping(boolean acceptRoleTemplates) { final boolean useTemplate = acceptRoleTemplates && randomBoolean(); final List roles; final List templates; From b0f58ab3886c9740049a6a63020118cd783c8f5a Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Tue, 30 Apr 2024 14:53:04 +0200 Subject: [PATCH 049/244] ESQL: Move expression classes into common package (#105407) Move comparisons to the expression package, so that all expressions are in the same package. --- .../compute/operator/EvalBenchmark.java | 2 +- .../comparison/EqualsBoolsEvaluator.java | 2 +- .../comparison/EqualsDoublesEvaluator.java | 2 +- .../comparison/EqualsGeometriesEvaluator.java | 2 +- .../operator/comparison/EqualsIntsEvaluator.java | 2 +- .../comparison/EqualsKeywordsEvaluator.java | 2 +- .../comparison/EqualsLongsEvaluator.java | 2 +- .../comparison/GreaterThanDoublesEvaluator.java | 2 +- .../comparison/GreaterThanIntsEvaluator.java | 2 +- .../comparison/GreaterThanKeywordsEvaluator.java | 2 +- .../comparison/GreaterThanLongsEvaluator.java | 2 +- .../GreaterThanOrEqualDoublesEvaluator.java | 2 +- .../GreaterThanOrEqualIntsEvaluator.java | 2 +- .../GreaterThanOrEqualKeywordsEvaluator.java | 2 +- .../GreaterThanOrEqualLongsEvaluator.java | 2 +- .../InsensitiveEqualsConstantEvaluator.java | 2 +- .../comparison/InsensitiveEqualsEvaluator.java | 2 +- .../comparison/LessThanDoublesEvaluator.java | 2 +- .../comparison/LessThanIntsEvaluator.java | 2 +- .../comparison/LessThanKeywordsEvaluator.java | 2 +- .../comparison/LessThanLongsEvaluator.java | 2 +- .../LessThanOrEqualDoublesEvaluator.java | 2 +- .../comparison/LessThanOrEqualIntsEvaluator.java | 2 +- .../LessThanOrEqualKeywordsEvaluator.java | 2 +- .../LessThanOrEqualLongsEvaluator.java | 2 +- .../comparison/NotEqualsBoolsEvaluator.java | 2 +- .../comparison/NotEqualsDoublesEvaluator.java | 2 +- .../comparison/NotEqualsGeometriesEvaluator.java | 2 +- .../comparison/NotEqualsIntsEvaluator.java | 2 +- .../comparison/NotEqualsKeywordsEvaluator.java | 2 +- .../comparison/NotEqualsLongsEvaluator.java | 2 +- .../xpack/esql/analysis/Verifier.java | 4 ++-- .../xpack/esql/evaluator/EvalMapper.java | 2 +- .../predicate/operator/comparison/InMapper.java | 1 + .../predicate/operator/comparison/Equals.java | 2 +- .../comparison/EsqlBinaryComparison.java | 2 +- .../operator/comparison/GreaterThan.java | 2 +- .../operator/comparison/GreaterThanOrEqual.java | 2 +- .../comparison/InsensitiveBinaryComparison.java | 2 +- .../operator/comparison/InsensitiveEquals.java | 2 +- .../comparison/InsensitiveEqualsMapper.java | 2 +- .../predicate/operator/comparison/LessThan.java | 2 +- .../operator/comparison/LessThanOrEqual.java | 2 +- .../predicate/operator/comparison/NotEquals.java | 2 +- .../xpack/esql/io/stream/PlanNamedTypes.java | 16 ++++++++-------- .../optimizer/LocalPhysicalPlanOptimizer.java | 6 +++--- .../esql/optimizer/LogicalPlanOptimizer.java | 2 +- .../xpack/esql/optimizer/OptimizerRules.java | 12 ++++++------ .../xpack/esql/parser/ExpressionBuilder.java | 12 ++++++------ .../esql/planner/EsqlExpressionTranslators.java | 14 +++++++------- .../operator/comparison/EqualsTests.java | 1 - .../comparison/EsqlBinaryComparisonTests.java | 4 ++-- .../comparison/GreaterThanOrEqualTests.java | 1 - .../operator/comparison/GreaterThanTests.java | 1 - .../comparison/InsensitiveEqualsTests.java | 2 +- .../comparison/LessThanOrEqualTests.java | 1 - .../operator/comparison/LessThanTests.java | 1 - .../operator/comparison/NotEqualsTests.java | 1 - .../esql/io/stream/PlanNamedTypesTests.java | 14 +++++++------- .../optimizer/LogicalPlanOptimizerTests.java | 13 ++++++------- .../esql/optimizer/OptimizerRulesTests.java | 12 ++++++------ .../optimizer/PhysicalPlanOptimizerTests.java | 10 +++++----- .../xpack/esql/parser/ExpressionTests.java | 8 ++++---- .../xpack/esql/parser/StatementParserTests.java | 10 +++++----- .../xpack/esql/planner/EvalMapperTests.java | 10 +++++----- 65 files changed, 117 insertions(+), 123 deletions(-) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EqualsBoolsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EqualsDoublesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EqualsGeometriesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EqualsIntsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EqualsKeywordsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EqualsLongsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanDoublesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanIntsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanLongsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/InsensitiveEqualsConstantEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/InsensitiveEqualsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanDoublesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanIntsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanKeywordsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanLongsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/NotEqualsBoolsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/NotEqualsDoublesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/NotEqualsIntsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/NotEqualsLongsEvaluator.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/Equals.java (98%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EsqlBinaryComparison.java (99%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThan.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/GreaterThanOrEqual.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/InsensitiveBinaryComparison.java (91%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/InsensitiveEquals.java (96%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/InsensitiveEqualsMapper.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThan.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/LessThanOrEqual.java (97%) rename x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/NotEquals.java (98%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/EsqlBinaryComparisonTests.java (93%) rename x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/{evaluator => expression}/predicate/operator/comparison/InsensitiveEqualsTests.java (98%) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java index 5b217efbe1ed1..6eeb58fe67bfd 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java @@ -23,12 +23,12 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.FieldAttribute; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsBoolsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsBoolsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsBoolsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsBoolsEvaluator.java index ef26fb4d000dc..9344c46ac1bff 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsBoolsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsBoolsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsDoublesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsDoublesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsDoublesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsDoublesEvaluator.java index d5b2e84384a03..3281809a723f1 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsDoublesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsGeometriesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsGeometriesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsGeometriesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsGeometriesEvaluator.java index 025cca53ceab0..2d5f9daa78f20 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsGeometriesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsGeometriesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsIntsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsIntsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsIntsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsIntsEvaluator.java index c2c9c7ce2b19c..0bd61aa346391 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsIntsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsKeywordsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsKeywordsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsKeywordsEvaluator.java index 8dc15ba6d2fec..8ddbc4296d979 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsKeywordsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsLongsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsLongsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsLongsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsLongsEvaluator.java index 870d7c546010f..fcd98f27ea3ef 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EqualsLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsLongsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanDoublesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanDoublesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanDoublesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanDoublesEvaluator.java index 051df8053417f..3f01ea2d34658 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanDoublesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanIntsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanIntsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanIntsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanIntsEvaluator.java index c6de582ef2909..83f63a3b3ae5e 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanIntsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java index cf243b68e473c..79e2516701bab 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanKeywordsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanLongsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanLongsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanLongsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanLongsEvaluator.java index 5f1a679c76a31..8dbf8c18a5047 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanLongsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java index c36031c321422..b188682facb32 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualDoublesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java index 2b64cfcf9ea49..10141ec62d3b9 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualIntsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java index b8b2c9b6d4459..977f9b0b955f4 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualKeywordsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java index 907a29c8c904d..b80872fb6d72f 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualLongsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsConstantEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsConstantEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsConstantEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsConstantEvaluator.java index 9417a2374ac12..dffbdb24efe19 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsConstantEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsConstantEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsEvaluator.java index 8832a77928aa3..bbb8cae36f9d8 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanDoublesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanDoublesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanDoublesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanDoublesEvaluator.java index c3cf8293071e3..ef3c955bb94f8 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanDoublesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanIntsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanIntsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanIntsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanIntsEvaluator.java index a66ac0e889090..994723001823b 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanIntsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanKeywordsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanKeywordsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanKeywordsEvaluator.java index a0951d9a09382..8e8360ad0677d 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanKeywordsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanLongsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanLongsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanLongsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanLongsEvaluator.java index f0e7ac134410b..5adf118e8c6eb 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanLongsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java index cf12098962599..e296c5f6a5f1b 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualDoublesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java index ffa8ab38bc2eb..088a8629d681e 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualIntsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java index 2e7aafeb2d805..fb388275db520 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualKeywordsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java index 9c211610da814..64f3587d411be 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualLongsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsBoolsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsBoolsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsBoolsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsBoolsEvaluator.java index 7d2067fe6bdbe..27cba7df19072 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsBoolsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsBoolsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsDoublesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsDoublesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsDoublesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsDoublesEvaluator.java index 174d3df53853b..a0f68648e03d1 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsDoublesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsDoublesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java index d0dd58e86babe..cf714564dbb9e 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsGeometriesEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsIntsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsIntsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsIntsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsIntsEvaluator.java index 03abc111d820e..ead28b80c3aea 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsIntsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsIntsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java index 919aeb4099b1f..aab45501e2c53 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsKeywordsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsLongsEvaluator.java b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsLongsEvaluator.java similarity index 98% rename from x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsLongsEvaluator.java rename to x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsLongsEvaluator.java index 4ec694f918d97..4aab9a6ad9ec8 100644 --- a/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEqualsLongsEvaluator.java +++ b/x-pack/plugin/esql/src/main/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsLongsEvaluator.java @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License // 2.0; you may not use this file except in compliance with the Elastic License // 2.0. -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import java.lang.IllegalArgumentException; import java.lang.Override; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index b318e7ed99bc0..688f15ed4fc39 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -7,11 +7,11 @@ package org.elasticsearch.xpack.esql.analysis; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index 096dcc183eaf4..b94becc820671 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -23,7 +23,7 @@ import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InMapper; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEqualsMapper; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEqualsMapper; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Attribute; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index cea88d3598c2f..430590e1cb240 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -18,6 +18,7 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.planner.Layout; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java index 9cc10a555f288..c3eb0c064c5f2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/Equals.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparison.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java similarity index 99% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparison.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java index ff09255f5aef2..a3b50ba9bc6d6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparison.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparison.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThan.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThan.java index 09fb32add0f18..4b3565005790c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThan.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqual.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqual.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqual.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqual.java index 1bbc4128cd1dd..2878fb7d22105 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqual.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqual.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveBinaryComparison.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java similarity index 91% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveBinaryComparison.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java index 3f8030ee18f97..69ef6b4648c78 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveBinaryComparison.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveBinaryComparison.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.scalar.BinaryScalarFunction; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEquals.java similarity index 96% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEquals.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEquals.java index ba0ebc5552cea..98cab93f1055d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEquals.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automaton; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsMapper.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsMapper.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsMapper.java index 8fdacf72e811c..b773473d4c066 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsMapper.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automaton; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java index 1649706a643c3..88cef9acff6c4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThan.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqual.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqual.java similarity index 97% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqual.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqual.java index 19973a963c1c3..da15e6b78655e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqual.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqual.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEquals.java similarity index 98% rename from x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java rename to x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEquals.java index 42b31c9efaaf2..daedafee02c68 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEquals.java @@ -4,7 +4,7 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 71814e6e6ca59..a2699b18585e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -19,14 +19,6 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EsqlBinaryComparison; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEquals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; @@ -136,7 +128,15 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Dissect.Parser; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index eaf5395e55702..cc8d5b3c123f5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -11,13 +11,13 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveBinaryComparison; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveBinaryComparison; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules.OptimizerRule; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index c62a6dcfb4cff..07357ac0222e2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -15,13 +15,13 @@ import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.grouping.GroupingFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 19d9c5de8df46..36d25d80a3123 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -7,13 +7,13 @@ package org.elasticsearch.xpack.esql.optimizer; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 087ead8539d00..c7342325764d6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -17,12 +17,6 @@ import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEquals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; @@ -34,7 +28,13 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.InvalidArgumentException; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 0afa6179fd3c8..757eb28a36074 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -12,16 +12,16 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.InsensitiveEquals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesUtils; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.InsensitiveEquals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.querydsl.query.SpatialRelatesQuery; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java index 0739cd4670c08..d5419d64be4b2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java @@ -10,7 +10,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparisonTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparisonTests.java similarity index 93% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparisonTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparisonTests.java index 5e9e702ff8d12..ab62d6b37f798 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/EsqlBinaryComparisonTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EsqlBinaryComparisonTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -13,7 +13,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison.BinaryComparisonOperation; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor; import java.io.IOException; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java index 2ccd6fd5b8b93..35b3979f99916 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java index 43408396ea8d0..f8ee18ff90c6a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsTests.java similarity index 98% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsTests.java rename to x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsTests.java index e1fd214b63b66..e7d2fde05d49f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InsensitiveEqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/InsensitiveEqualsTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.test.ESTestCase; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java index ba2c52d8e873a..35da7b4dabf61 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java index 62d59e5972caa..e5f77e15eece5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java @@ -11,7 +11,6 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java index ec5d2338adae2..8f9dbcbe9414a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java @@ -10,7 +10,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index e22fa3c66384b..66ff143c8fe48 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -17,13 +17,6 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.SerializationTestUtils; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.EsqlBinaryComparison; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; @@ -46,6 +39,13 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.EsqlBinaryComparison; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index a5aa897b8903f..e802ed8bcd522 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -18,12 +18,6 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; @@ -69,7 +63,13 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Dissect; @@ -140,7 +140,6 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; import static org.elasticsearch.xpack.ql.TestUtils.getFieldAttribute; -import static org.elasticsearch.xpack.ql.TestUtils.relation; import static org.elasticsearch.xpack.ql.expression.Literal.FALSE; import static org.elasticsearch.xpack.ql.expression.Literal.NULL; import static org.elasticsearch.xpack.ql.expression.Literal.TRUE; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java index 28944252191be..39b6d84812738 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRulesTests.java @@ -8,13 +8,13 @@ package org.elasticsearch.xpack.esql.optimizer; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.ql.TestUtils; import org.elasticsearch.xpack.ql.expression.Expression; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index a413a2e2d4f8e..82334afbffd03 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -31,11 +31,6 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.enrich.ResolvedEnrichPolicy; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.SpatialCentroid; @@ -47,6 +42,11 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialIntersects; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialRelatesFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.spatial.SpatialWithin; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java index da58f4a1de183..2d2926cff2952 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java @@ -8,16 +8,16 @@ package org.elasticsearch.xpack.esql.parser; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.UnresolvedNamePattern; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Rename; import org.elasticsearch.xpack.ql.expression.Alias; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 8901f94cd2cf6..e994617011f51 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -12,14 +12,14 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.VerificationException; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike; import org.elasticsearch.xpack.esql.expression.function.scalar.string.WildcardLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsqlAggregate; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 7f8124bec6895..05fdff8a75f80 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -19,11 +19,6 @@ import org.elasticsearch.xpack.esql.SerializationTestUtils; import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; @@ -39,6 +34,11 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; From fc287bde8ba405ac2c81ee4663bdec577376f06c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 30 Apr 2024 14:54:15 +0100 Subject: [PATCH 050/244] Interpret `?timeout=-1` as infinite ack timeout (#107675) APIs which perform cluster state updates typically accept the `?master_timeout=` and `?timeout=` parameters to respectively set the pending task queue timeout and the acking timeout for the cluster state update. Both of these parameters accept the value `-1`, but `?master_timeout=-1` means to wait indefinitely whereas `?timeout=-1` means the same thing as `?timeout=0`, namely that acking times out immediately on commit. There are some situations where it makes sense to wait for as long as possible for nodes to ack a cluster state update. In practice this wait is bounded by other mechanisms (e.g. the lag detector will remove the node from the cluster after a couple of minutes of failing to apply cluster state updates) but these are not really the concern of clients. Therefore with this commit we change the meaning of `?timeout=-1` to mean that the acking timeout is infinite. --- docs/changelog/107675.yaml | 17 ++++++ docs/reference/rest-api/common-parms.asciidoc | 9 ++-- ...pdateDataStreamGlobalRetentionService.java | 5 +- .../admin/indices/create/CreateIndexIT.java | 54 +++++++++++++++++++ .../cluster/service/MasterService.java | 8 +++ .../cluster/service/MasterServiceTests.java | 51 ++++++++++++++++++ 6 files changed, 136 insertions(+), 8 deletions(-) create mode 100644 docs/changelog/107675.yaml diff --git a/docs/changelog/107675.yaml b/docs/changelog/107675.yaml new file mode 100644 index 0000000000000..b1d51cd3f8538 --- /dev/null +++ b/docs/changelog/107675.yaml @@ -0,0 +1,17 @@ +pr: 107675 +summary: Interpret `?timeout=-1` as infinite ack timeout +area: Cluster Coordination +type: breaking +issues: [] +breaking: + title: Interpret `?timeout=-1` as infinite ack timeout + area: REST API + details: | + Today {es} accepts the parameter `?timeout=-1` in many APIs, but interprets + this to mean the same as `?timeout=0`. From 8.15 onwards `?timeout=-1` will + mean to wait indefinitely, aligning the behaviour of this parameter with + other similar parameters such as `?master_timeout`. + impact: | + Use `?timeout=0` to force relevant operations to time out immediately + instead of `?timeout=-1` + notable: false diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index e63f66217d8d7..dd264c0e5bcd2 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1223,12 +1223,13 @@ the timeout expires, the request fails and returns an error. Defaults to `30s`. Can also be set to `-1` to indicate that the request should never timeout. end::master-timeout[] -tag::timeout[] `timeout`:: (Optional, <>) -Period to wait for a response. If no response is received before the timeout -expires, the request fails and returns an error. Defaults to `30s`. -end::timeout[] +Period to wait for a response from all relevant nodes in the cluster after +updating the cluster metadata. If no response is received before the timeout +expires, the cluster metadata update still applies but the response will +indicate that it was not completely acknowledged. Defaults to `30s`. +Can also be set to `-1` to indicate that the request should never timeout. end::timeoutparms[] tag::transform-id[] diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionService.java index a906008c17742..4c54189ee0111 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/UpdateDataStreamGlobalRetentionService.java @@ -87,12 +87,9 @@ public void removeGlobalRetention( List affectedDataStreams, final ActionListener listener ) { - final var ackTimeout = request.masterNodeTimeout().millis() < 0 ? TimeValue.MAX_VALUE : request.masterNodeTimeout(); - // NB a negative master node timeout means never to time out, but a negative ack timeout means to time out immediately. - // TODO when https://github.com/elastic/elasticsearch/issues/107044 is fixed, we can just use request.masterNodeTimeout() directly taskQueue.submitTask( "remove-data-stream-global-retention", - new UpsertGlobalDataStreamMetadataTask(null, affectedDataStreams, listener, ackTimeout), + new UpsertGlobalDataStreamMetadataTask(null, affectedDataStreams, listener, request.masterNodeTimeout()), request.masterNodeTimeout() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index e0be40aeab18c..26a430123ccd9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -8,20 +8,28 @@ package org.elasticsearch.action.admin.indices.create; +import io.netty.handler.codec.http.HttpMethod; + import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Response; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; @@ -31,17 +39,24 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentFactory; +import java.io.IOException; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.rest.ESRestTestCase.entityAsMap; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -53,6 +68,11 @@ @ClusterScope(scope = Scope.TEST) public class CreateIndexIT extends ESIntegTestCase { + @Override + protected boolean addMockHttpTransport() { + return false; // expose HTTP requests + } + public void testCreationDateGivenFails() { try { prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_CREATION_DATE, 4L)).get(); @@ -370,4 +390,38 @@ public void testIndexNameInResponse() { assertEquals("Should have index name in response", "foo", response.index()); } + public void testInfiniteAckTimeout() throws IOException { + final var clusterService = internalCluster().getInstance(ClusterService.class); + final var barrier = new CyclicBarrier(2); + clusterService.getClusterApplierService().runOnApplierThread("block for test", Priority.NORMAL, cs -> { + safeAwait(barrier); + safeAwait(barrier); + }, ActionListener.noop()); + + safeAwait(barrier); + + final var request = ESRestTestCase.newXContentRequest( + HttpMethod.PUT, + "testindex", + (builder, params) -> builder.startObject("settings") + .field(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .field(SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1) + .endObject() + ); + request.addParameter("timeout", "-1"); + final var responseFuture = new PlainActionFuture(); + getRestClient().performRequestAsync(request, ActionTestUtils.wrapAsRestResponseListener(responseFuture)); + + if (randomBoolean()) { + safeSleep(scaledRandomIntBetween(1, 100)); + } + + assertFalse(responseFuture.isDone()); + safeAwait(barrier); + + final var response = FutureUtils.get(responseFuture, 10, TimeUnit.SECONDS); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertTrue((boolean) extractValue("acknowledged", entityAsMap(response))); + } + } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index a9f891e555f21..7f9720b64cca6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -711,6 +711,14 @@ public void onCommit(TimeValue commitTime) { assert false : "ackTimeout must always be present: " + contextPreservingAckListener; ackTimeout = TimeValue.ZERO; } + + if (ackTimeout.millis() < 0) { + if (countDown.countDown()) { + finish(); + } + return; + } + final TimeValue timeLeft = TimeValue.timeValueNanos(Math.max(0, ackTimeout.nanos() - commitTime.nanos())); if (timeLeft.nanos() == 0L) { onTimeout(); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 57fb819ccd50e..50030143ec354 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -1663,6 +1663,57 @@ public void onAckTimeout() { deterministicTaskQueue.runAllTasksInTimeOrder(); safeAwait(latch); } + + // check that -1 means an infinite ack timeout + { + final CountDownLatch latch = new CountDownLatch(2); + + publisherRef.set((clusterChangedEvent, publishListener, ackListener) -> { + publishListener.onResponse(null); + ackListener.onCommit(TimeValue.timeValueMillis(randomLongBetween(0, TimeValue.timeValueDays(1).millis()))); + for (final var node : new DiscoveryNode[] { node1, node2, node3 }) { + deterministicTaskQueue.scheduleAt( + deterministicTaskQueue.getCurrentTimeMillis() + randomLongBetween(0, TimeValue.timeValueDays(1).millis()), + () -> ackListener.onNodeAck(node, null) + ); + } + }); + + masterService.submitUnbatchedStateUpdateTask( + "test2", + new AckedClusterStateUpdateTask(ackedRequest(TimeValue.MINUS_ONE, null), null) { + @Override + public ClusterState execute(ClusterState currentState) { + return ClusterState.builder(currentState).build(); + } + + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + assertTrue(acknowledged); + latch.countDown(); + return AcknowledgedResponse.TRUE; + } + + @Override + public void onFailure(Exception e) { + fail(); + } + + @Override + public void onAckTimeout() { + fail(); + } + } + ); + + deterministicTaskQueue.runAllTasks(); // NB not in time order, there's no timeout to avoid + safeAwait(latch); + } } } From 62f55be72d2052ef6d355033691791e3f952b7b6 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Tue, 30 Apr 2024 10:06:00 -0400 Subject: [PATCH 051/244] [ESQL] Remove unused NullEquals class (#108022) Although the ES|QL language doesn't support a null equals operator, we had a placeholder class for it with no @Evaluator methods defined. I presume this was to match the QL BinaryComparisonOperator enum, although I wasn't involved in the initial discussions around that choice. This PR removes the unused code, and associated references to it. --- .../operator/comparison/NullEquals.java | 48 ------------------- .../xpack/esql/optimizer/OptimizerRules.java | 4 +- .../planner/EsqlExpressionTranslators.java | 6 +-- .../esql/optimizer/OptimizerRulesTests.java | 38 --------------- 4 files changed, 3 insertions(+), 93 deletions(-) delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NullEquals.java diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NullEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NullEquals.java deleted file mode 100644 index 4ab2d3fa8e7b9..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NullEquals.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; - -import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; -import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.expression.TypeResolutions; -import org.elasticsearch.xpack.ql.tree.NodeInfo; -import org.elasticsearch.xpack.ql.tree.Source; - -import java.time.ZoneId; - -import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; - -public class NullEquals extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals { - public NullEquals(Source source, Expression left, Expression right, ZoneId zoneId) { - super(source, left, right, zoneId); - } - - @Override - protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { - return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); - } - - @Override - protected NodeInfo info() { - return NodeInfo.create(this, NullEquals::new, left(), right(), zoneId()); - } - - @Override - protected org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals replaceChildren( - Expression newLeft, - Expression newRight - ) { - return new NullEquals(source(), newLeft, newRight, zoneId()); - } - - @Override - public org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals swapLeftAndRight() { - return new NullEquals(source(), right(), left(), zoneId()); - } - -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java index 36d25d80a3123..3b7245b38ae4c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/OptimizerRules.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.EsRelation; @@ -333,8 +332,7 @@ private static Expression propagate(And and) { for (Expression ex : Predicates.splitAnd(and)) { if (ex instanceof Range) { ranges.add((Range) ex); - } else if (ex instanceof Equals || ex instanceof NullEquals) { - BinaryComparison otherEq = (BinaryComparison) ex; + } else if (ex instanceof Equals otherEq) { // equals on different values evaluate to FALSE // ignore date/time fields as equality comparison might actually be a range check if (otherEq.right().foldable() && DataTypes.isDateTime(otherEq.left().dataType()) == false) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java index 757eb28a36074..610c5dbb7dead 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlExpressionTranslators.java @@ -22,7 +22,6 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.querydsl.query.SpatialRelatesQuery; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; @@ -133,7 +132,6 @@ static Query translate(InsensitiveEquals bc) { *