From 968f99752a313edb241cd64162b456e7042784bb Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 25 Mar 2024 10:56:29 -0400 Subject: [PATCH 01/56] Bump OpenSearch version in README for open issues (#12895) Signed-off-by: Craig Perkins --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b5fc45509b002..748f8a366ecc8 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ [![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability") [![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues) [![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls) -[![2.10 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.10.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.10.0") -[![3.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") +[![2.14.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.14.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.14.0") +[![3.0.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0") [![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml) [![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml) [![GHA precommit](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/precommit.yml) From b98573787a270b5168705c8c00c3600d8f698ecc Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 25 Mar 2024 12:18:24 -0400 Subject: [PATCH 02/56] Update to Gradle 8.7 (#12444) Signed-off-by: Andriy Redko --- .../java/org/opensearch/gradle/test/GradleThreadsFilter.java | 4 +++- gradle/wrapper/gradle-wrapper.properties | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java index def5248c1f255..0ede465439400 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/GradleThreadsFilter.java @@ -45,6 +45,8 @@ public class GradleThreadsFilter implements ThreadFilter { public boolean reject(Thread t) { return t.getName().startsWith("Exec process") || t.getName().startsWith("Memory manager") - || t.getName().startsWith("File watcher consumer"); + || t.getName().startsWith("File watcher consumer") + || t.getName().startsWith("sshd-SshClient") /* Started by SshClient (sshd-core), part of SftpFileSystemProvider */ + || t.getName().startsWith("Thread-"); /* Started by AbstractFactoryManager (sshd-core), part of SftpFileSystemProvider */ } } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 82a4add334a7d..9b0d73222260e 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=85719317abd2112f021d4f41f09ec370534ba288432065f4b477b6a3b652910d +distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0 From 52ae79d017a8e961cc1bc81fa7eafcc59689a447 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 25 Mar 2024 09:37:33 -0700 Subject: [PATCH 03/56] Disable MockTelemetryPlugin by default for integ tests (#12877) With this plugin enabled we see significant increase in memory usage. This is due to a static map that collects Span entries per write/replication request. Tests with higher counts of independent writes are at risk of running oom or experiencing slow down / warnings related to this. Disabled the plugin by default until this is resolved given it is still under FeatureFlag. Signed-off-by: Marc Handalian --- .../main/java/org/opensearch/test/OpenSearchIntegTestCase.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 7cb1b3f4fe0d8..664314245530e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -2070,7 +2070,8 @@ protected boolean addMockGeoShapeFieldMapper() { * @return boolean. */ protected boolean addMockTelemetryPlugin() { - return true; + // setting to false until https://github.com/opensearch-project/OpenSearch/issues/12615 is resolved + return false; } /** From 70711cfde74a00249e323ccc4198db657b9d9303 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Mon, 25 Mar 2024 11:32:01 -0700 Subject: [PATCH 04/56] [DerivedFields] DerivedFieldScript and query execution logic (#12746) First in a series of commits to support derived fields, a form of schema-on-read. This commit adds: 1. DerivedFieldScript factory: This script factory will be used to execute scripts defined against derived fields of any type. 2. DerivedFieldValueFetcher: The value fetcher contains logic to execute script and fetch the value in form of List. It expects DerivedFieldScript.LeafFactory as an input and sets the contract with consumer to call setNextReader() whenever a segment is switched. 3. DerivedFieldQuery: This query will be used by any of the derived fields. It expects an input query and DerivedFieldValueFetcher. It uses 2-phase iterator approach with approximation iterator set to match all docs. On a match, it creates a lucene MemoryIndex for a given doc, fetches the value of the derived field from _source using DerivedFieldValueFetcher and executes the input query against. --------- Signed-off-by: Rishabh Maurya --- .../test/painless/71_context_api.yml | 2 +- .../mapper/DerivedFieldValueFetcher.java | 45 ++++++ .../index/query/DerivedFieldQuery.java | 146 ++++++++++++++++++ .../opensearch/script/DerivedFieldScript.java | 104 +++++++++++++ .../org/opensearch/script/ScriptModule.java | 3 +- .../index/query/DerivedFieldQueryTests.java | 104 +++++++++++++ .../opensearch/script/MockScriptEngine.java | 15 ++ 7 files changed, 417 insertions(+), 2 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java create mode 100644 server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java create mode 100644 server/src/main/java/org/opensearch/script/DerivedFieldScript.java create mode 100644 server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml index 478ca9ae8abf4..20e6fd351a4b9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/71_context_api.yml @@ -2,7 +2,7 @@ - do: scripts_painless_context: {} - match: { contexts.0: aggregation_selector} - - match: { contexts.23: update} + - match: { contexts.24: update} --- "Action to get all API values for score context": diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java new file mode 100644 index 0000000000000..f3bf0c613415a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldValueFetcher.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.List; + +/** + * The value fetcher contains logic to execute script and fetch the value in form of list of object. + * It expects DerivedFieldScript.LeafFactory as an input and sets the contract with consumer to call + * {@link #setNextReader(LeafReaderContext)} whenever a segment is switched. + */ +public final class DerivedFieldValueFetcher implements ValueFetcher { + private DerivedFieldScript derivedFieldScript; + private final DerivedFieldScript.LeafFactory derivedFieldScriptFactory; + + public DerivedFieldValueFetcher(DerivedFieldScript.LeafFactory derivedFieldScriptFactory) { + this.derivedFieldScriptFactory = derivedFieldScriptFactory; + } + + @Override + public List fetchValues(SourceLookup lookup) { + derivedFieldScript.setDocument(lookup.docId()); + // TODO: remove List.of() when derivedFieldScript.execute() returns list of objects. + return List.of(derivedFieldScript.execute()); + } + + public void setNextReader(LeafReaderContext context) { + try { + derivedFieldScript = derivedFieldScriptFactory.newInstance(context); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java new file mode 100644 index 0000000000000..8beef0bf46be0 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/DerivedFieldQuery.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +/** + * DerivedFieldQuery used for querying derived fields. It contains the logic to execute an input lucene query against + * DerivedField. It also accepts DerivedFieldValueFetcher and SearchLookup as an input. + */ +public final class DerivedFieldQuery extends Query { + private final Query query; + private final DerivedFieldValueFetcher valueFetcher; + private final SearchLookup searchLookup; + private final Function indexableFieldGenerator; + private final Analyzer indexAnalyzer; + + /** + * @param query lucene query to be executed against the derived field + * @param valueFetcher DerivedFieldValueFetcher ValueFetcher to fetch the value of a derived field from _source + * using LeafSearchLookup + * @param searchLookup SearchLookup to get the LeafSearchLookup look used by valueFetcher to fetch the _source + * @param indexableFieldGenerator used to generate lucene IndexableField from a given object fetched by valueFetcher + * to be used in lucene memory index. + */ + public DerivedFieldQuery( + Query query, + DerivedFieldValueFetcher valueFetcher, + SearchLookup searchLookup, + Function indexableFieldGenerator, + Analyzer indexAnalyzer + ) { + this.query = query; + this.valueFetcher = valueFetcher; + this.searchLookup = searchLookup; + this.indexableFieldGenerator = indexableFieldGenerator; + this.indexAnalyzer = indexAnalyzer; + } + + @Override + public void visit(QueryVisitor visitor) { + query.visit(visitor); + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + Query rewritten = indexSearcher.rewrite(query); + if (rewritten == query) { + return this; + } + return new DerivedFieldQuery(rewritten, valueFetcher, searchLookup, indexableFieldGenerator, indexAnalyzer); + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + + return new ConstantScoreWeight(this, boost) { + @Override + public Scorer scorer(LeafReaderContext context) { + DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc()); + valueFetcher.setNextReader(context); + LeafSearchLookup leafSearchLookup = searchLookup.getLeafSearchLookup(context); + TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { + @Override + public boolean matches() { + leafSearchLookup.source().setSegmentAndDocument(context, approximation.docID()); + List values = valueFetcher.fetchValues(leafSearchLookup.source()); + // TODO: in case of errors from script, should it be ignored and treated as missing field + // by using a configurable setting? + MemoryIndex memoryIndex = new MemoryIndex(); + for (Object value : values) { + memoryIndex.addField(indexableFieldGenerator.apply(value), indexAnalyzer); + } + float score = memoryIndex.search(query); + return score > 0.0f; + } + + @Override + public float matchCost() { + // TODO: how can we compute this? + return 1000f; + } + }; + return new ConstantScoreScorer(this, score(), scoreMode, twoPhase); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return false; + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (sameClassAs(o) == false) { + return false; + } + DerivedFieldQuery other = (DerivedFieldQuery) o; + return Objects.equals(this.query, other.query) + && Objects.equals(this.valueFetcher, other.valueFetcher) + && Objects.equals(this.searchLookup, other.searchLookup) + && Objects.equals(this.indexableFieldGenerator, other.indexableFieldGenerator) + && Objects.equals(this.indexAnalyzer, other.indexAnalyzer); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), query, valueFetcher, searchLookup, indexableFieldGenerator, indexableFieldGenerator); + } + + @Override + public String toString(String f) { + return "DerivedFieldQuery (Query: [ " + query.toString(f) + "])"; + } +} diff --git a/server/src/main/java/org/opensearch/script/DerivedFieldScript.java b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java new file mode 100644 index 0000000000000..7f5b991950ec6 --- /dev/null +++ b/server/src/main/java/org/opensearch/script/DerivedFieldScript.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.script; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.common.logging.DeprecationLogger; +import org.opensearch.index.fielddata.ScriptDocValues; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +/** + * Definition of Script for DerivedField. + * It will be used to execute scripts defined against derived fields of any type + * + * @opensearch.internal + */ +public abstract class DerivedFieldScript { + + public static final String[] PARAMETERS = {}; + public static final ScriptContext CONTEXT = new ScriptContext<>("derived_field", Factory.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); + + private static final Map> PARAMS_FUNCTIONS = Map.of( + "doc", + value -> value, + "_source", + value -> ((SourceLookup) value).loadSourceIfNeeded() + ); + + /** + * The generic runtime parameters for the script. + */ + private final Map params; + + /** + * A leaf lookup for the bound segment this script will operate on. + */ + private final LeafSearchLookup leafLookup; + + public DerivedFieldScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { + Map parameters = new HashMap<>(params); + this.leafLookup = lookup.getLeafSearchLookup(leafContext); + parameters.putAll(leafLookup.asMap()); + this.params = new DynamicMap(parameters, PARAMS_FUNCTIONS); + } + + protected DerivedFieldScript() { + params = null; + leafLookup = null; + } + + /** + * Return the parameters for this script. + */ + public Map getParams() { + return params; + } + + /** + * The doc lookup for the Lucene segment this script was created for. + */ + public Map> getDoc() { + return leafLookup.doc(); + } + + /** + * Set the current document to run the script on next. + */ + public void setDocument(int docid) { + leafLookup.setDocument(docid); + } + + public abstract Object execute(); + + /** + * A factory to construct {@link DerivedFieldScript} instances. + * + * @opensearch.internal + */ + public interface LeafFactory { + DerivedFieldScript newInstance(LeafReaderContext ctx) throws IOException; + } + + /** + * A factory to construct stateful {@link DerivedFieldScript} factories for a specific index. + * + * @opensearch.internal + */ + public interface Factory extends ScriptFactory { + LeafFactory newFactory(Map params, SearchLookup lookup); + } +} diff --git a/server/src/main/java/org/opensearch/script/ScriptModule.java b/server/src/main/java/org/opensearch/script/ScriptModule.java index a192e9553016b..c83a6e64d53eb 100644 --- a/server/src/main/java/org/opensearch/script/ScriptModule.java +++ b/server/src/main/java/org/opensearch/script/ScriptModule.java @@ -78,7 +78,8 @@ public class ScriptModule { ScriptedMetricAggContexts.MapScript.CONTEXT, ScriptedMetricAggContexts.CombineScript.CONTEXT, ScriptedMetricAggContexts.ReduceScript.CONTEXT, - IntervalFilterScript.CONTEXT + IntervalFilterScript.CONTEXT, + DerivedFieldScript.CONTEXT ).collect(Collectors.toMap(c -> c.name, Function.identity())); } diff --git a/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java new file mode 100644 index 0000000000000..18d117fa8c0f5 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/DerivedFieldQueryTests.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.mapper.DerivedFieldValueFetcher; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.lookup.SourceLookup; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldQueryTests extends OpenSearchTestCase { + + private static final String[][] raw_requests = new String[][] { + { "40.135.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "40.135.0.0" }, + { "232.0.0.0 GET /images/hm_bg.jpg HTTP/1.0", "400", "232.0.0.0" }, + { "26.1.0.0 GET /images/hm_bg.jpg HTTP/1.0", "200", "26.1.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" }, + { "247.37.0.0 GET /french/splash_inet.html HTTP/1.0", "400", "247.37.0.0" } }; + + public void testDerivedField() throws IOException { + // Create lucene documents + List docs = new ArrayList<>(); + for (String[] request : raw_requests) { + Document document = new Document(); + document.add(new TextField("raw_request", request[0], Field.Store.YES)); + document.add(new KeywordField("status", request[1], Field.Store.YES)); + docs.add(document); + } + + // Mock SearchLookup + SearchLookup searchLookup = mock(SearchLookup.class); + SourceLookup sourceLookup = new SourceLookup(); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(leafLookup.source()).thenReturn(sourceLookup); + + // Mock DerivedFieldScript.Factory + DerivedFieldScript.Factory factory = (params, lookup) -> (DerivedFieldScript.LeafFactory) ctx -> { + when(searchLookup.getLeafSearchLookup(ctx)).thenReturn(leafLookup); + return new DerivedFieldScript(params, lookup, ctx) { + @Override + public Object execute() { + return raw_requests[sourceLookup.docId()][2]; + } + }; + }; + + // Create ValueFetcher from mocked DerivedFieldScript.Factory + DerivedFieldScript.LeafFactory leafFactory = factory.newFactory((new Script("")).getParams(), searchLookup); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(leafFactory); + + // Create DerivedFieldQuery + DerivedFieldQuery derivedFieldQuery = new DerivedFieldQuery( + new TermQuery(new Term("ip_from_raw_request", "247.37.0.0")), + valueFetcher, + searchLookup, + (o -> new KeywordField("ip_from_raw_request", (String) o, Field.Store.NO)), + Lucene.STANDARD_ANALYZER + ); + + // Index and Search + + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + for (Document d : docs) { + iw.addDocument(d); + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(derivedFieldQuery, 10); + assertEquals(2, topDocs.totalHits.value); + } + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java index 83b245a1bcecb..8c7e9718eb0cd 100644 --- a/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/opensearch/script/MockScriptEngine.java @@ -281,7 +281,22 @@ public double execute(Map params1, double[] values) { } else if (context.instanceClazz.equals(IntervalFilterScript.class)) { IntervalFilterScript.Factory factory = mockCompiled::createIntervalFilterScript; return context.factoryClazz.cast(factory); + } else if (context.instanceClazz.equals(DerivedFieldScript.class)) { + DerivedFieldScript.Factory factory = (derivedFieldsParams, lookup) -> ctx -> new DerivedFieldScript( + derivedFieldsParams, + lookup, + ctx + ) { + @Override + public Object execute() { + Map vars = new HashMap<>(derivedFieldsParams); + vars.put("params", derivedFieldsParams); + return script.apply(vars); + } + }; + return context.factoryClazz.cast(factory); } + ContextCompiler compiler = contexts.get(context); if (compiler != null) { return context.factoryClazz.cast(compiler.compile(script::apply, params)); From 12f1487cf8e3d28fa0ec99aba435099aeba904ad Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Mon, 25 Mar 2024 16:06:45 -0400 Subject: [PATCH 05/56] Fix issue with feature flags where default value may not be honored (#12849) * Fix issue with feature flags passed as system props where default value was not being honored Signed-off-by: Craig Perkins * Add CHANGELOG entry Signed-off-by: Craig Perkins * Add test for default value of false Signed-off-by: Craig Perkins * Fix issue when empty settings passed in initialization Signed-off-by: Craig Perkins * Get actual value from settings and default from ff setting Signed-off-by: Craig Perkins * Add test with non-empty setting initialization Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../opensearch/common/util/FeatureFlags.java | 82 ++++++++++++------- .../common/util/FeatureFlagTests.java | 34 ++++++++ 3 files changed, 89 insertions(+), 28 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 882a46d60a191..21b66d3efc5bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Removed ### Fixed +- Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) ### Security diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 8633cf1fe25ea..bdfce72d106d3 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -12,9 +12,11 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; +import java.util.List; + /** * Utility class to manage feature flags. Feature flags are system properties that must be set on the JVM. - * These are used to gate the visibility/availability of incomplete features. Fore more information, see + * These are used to gate the visibility/availability of incomplete features. For more information, see * https://featureflags.io/feature-flag-introduction/ * * @opensearch.internal @@ -65,11 +67,54 @@ public class FeatureFlags { */ public static final String PLUGGABLE_CACHE = "opensearch.experimental.feature.pluggable.caching.enabled"; + public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( + REMOTE_STORE_MIGRATION_EXPERIMENTAL, + false, + Property.NodeScope + ); + + public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); + + public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); + + public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); + + public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( + DATETIME_FORMATTER_CACHING, + true, + Property.NodeScope + ); + + public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( + WRITEABLE_REMOTE_INDEX, + false, + Property.NodeScope + ); + + public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); + + private static final List> ALL_FEATURE_FLAG_SETTINGS = List.of( + REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING, + EXTENSIONS_SETTING, + IDENTITY_SETTING, + TELEMETRY_SETTING, + DATETIME_FORMATTER_CACHING_SETTING, + WRITEABLE_REMOTE_INDEX_SETTING, + PLUGGABLE_CACHE_SETTING + ); /** * Should store the settings from opensearch.yml. */ private static Settings settings; + static { + Settings.Builder settingsBuilder = Settings.builder(); + for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { + settingsBuilder = settingsBuilder.put(ffSetting.getKey(), ffSetting.getDefault(Settings.EMPTY)); + } + settings = settingsBuilder.build(); + } + /** * This method is responsible to map settings from opensearch.yml to local stored * settings value. That is used for the existing isEnabled method. @@ -77,7 +122,14 @@ public class FeatureFlags { * @param openSearchSettings The settings stored in opensearch.yml. */ public static void initializeFeatureFlags(Settings openSearchSettings) { - settings = openSearchSettings; + Settings.Builder settingsBuilder = Settings.builder(); + for (Setting ffSetting : ALL_FEATURE_FLAG_SETTINGS) { + settingsBuilder = settingsBuilder.put( + ffSetting.getKey(), + openSearchSettings.getAsBoolean(ffSetting.getKey(), ffSetting.getDefault(openSearchSettings)) + ); + } + settings = settingsBuilder.build(); } /** @@ -103,30 +155,4 @@ public static boolean isEnabled(Setting featureFlag) { return featureFlag.getDefault(Settings.EMPTY); } } - - public static final Setting REMOTE_STORE_MIGRATION_EXPERIMENTAL_SETTING = Setting.boolSetting( - REMOTE_STORE_MIGRATION_EXPERIMENTAL, - false, - Property.NodeScope - ); - - public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); - - public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); - - public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); - - public static final Setting DATETIME_FORMATTER_CACHING_SETTING = Setting.boolSetting( - DATETIME_FORMATTER_CACHING, - true, - Property.NodeScope - ); - - public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( - WRITEABLE_REMOTE_INDEX, - false, - Property.NodeScope - ); - - public static final Setting PLUGGABLE_CACHE_SETTING = Setting.boolSetting(PLUGGABLE_CACHE, false, Property.NodeScope); } diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index f175308482b15..88cb3782252b7 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -8,9 +8,14 @@ package org.opensearch.common.util; +import org.opensearch.common.settings.Settings; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchTestCase; +import static org.opensearch.common.util.FeatureFlags.DATETIME_FORMATTER_CACHING; +import static org.opensearch.common.util.FeatureFlags.EXTENSIONS; +import static org.opensearch.common.util.FeatureFlags.IDENTITY; + public class FeatureFlagTests extends OpenSearchTestCase { private final String FLAG_PREFIX = "opensearch.experimental.feature."; @@ -33,4 +38,33 @@ public void testNonBooleanFeatureFlag() { assertNotNull(System.getProperty(javaVersionProperty)); assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); } + + public void testBooleanFeatureFlagWithDefaultSetToTrue() { + final String testFlag = DATETIME_FORMATTER_CACHING; + assertNotNull(testFlag); + assertTrue(FeatureFlags.isEnabled(testFlag)); + } + + public void testBooleanFeatureFlagWithDefaultSetToFalse() { + final String testFlag = IDENTITY; + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + assertNotNull(testFlag); + assertFalse(FeatureFlags.isEnabled(testFlag)); + } + + public void testBooleanFeatureFlagInitializedWithEmptySettingsAndDefaultSetToTrue() { + final String testFlag = DATETIME_FORMATTER_CACHING; + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + assertNotNull(testFlag); + assertTrue(FeatureFlags.isEnabled(testFlag)); + } + + public void testInitializeFeatureFlagsWithExperimentalSettings() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(IDENTITY, true).build()); + assertTrue(FeatureFlags.isEnabled(IDENTITY)); + assertTrue(FeatureFlags.isEnabled(DATETIME_FORMATTER_CACHING)); + assertFalse(FeatureFlags.isEnabled(EXTENSIONS)); + // reset FeatureFlags to defaults + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } } From 7e99cac03872a5b0d8a8f68a572e34f55edb349f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 25 Mar 2024 16:31:40 -0400 Subject: [PATCH 06/56] Bump asm from 9.6 to 9.7 (#12908) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- modules/lang-expression/licenses/asm-9.6.jar.sha1 | 1 - modules/lang-expression/licenses/asm-9.7.jar.sha1 | 1 + modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 | 1 - modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 | 1 + modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 | 1 - modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 | 1 + modules/lang-painless/licenses/asm-util-9.6.jar.sha1 | 1 - modules/lang-painless/licenses/asm-util-9.7.jar.sha1 | 1 + 18 files changed, 10 insertions(+), 9 deletions(-) delete mode 100644 modules/lang-expression/licenses/asm-9.6.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-9.7.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 delete mode 100644 modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 create mode 100644 modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 delete mode 100644 modules/lang-painless/licenses/asm-util-9.6.jar.sha1 create mode 100644 modules/lang-painless/licenses/asm-util-9.7.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 21b66d3efc5bd..11772b7b79ef7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added ### Dependencies +- Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8705588babe97..2a9fdb7cdddaf 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -14,7 +14,7 @@ icu4j = 70.1 supercsv = 2.4.0 log4j = 2.21.0 slf4j = 1.7.36 -asm = 9.6 +asm = 9.7 jettison = 1.5.4 woodstox = 6.4.0 kotlin = 1.7.10 diff --git a/modules/lang-expression/licenses/asm-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-9.6.jar.sha1 deleted file mode 100644 index 2d9e6a9d3cfd6..0000000000000 --- a/modules/lang-expression/licenses/asm-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-9.7.jar.sha1 new file mode 100644 index 0000000000000..84c9a9703af6d --- /dev/null +++ b/modules/lang-expression/licenses/asm-9.7.jar.sha1 @@ -0,0 +1 @@ +073d7b3086e14beb604ced229c302feff6449723 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 deleted file mode 100644 index a0814f495771f..0000000000000 --- a/modules/lang-expression/licenses/asm-commons-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 new file mode 100644 index 0000000000000..1de4404e7d5d0 --- /dev/null +++ b/modules/lang-expression/licenses/asm-commons-9.7.jar.sha1 @@ -0,0 +1 @@ +e86dda4696d3c185fcc95d8d311904e7ce38a53f \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 deleted file mode 100644 index 101eb03b4b736..0000000000000 --- a/modules/lang-expression/licenses/asm-tree-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 b/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 new file mode 100644 index 0000000000000..d4eeef6151272 --- /dev/null +++ b/modules/lang-expression/licenses/asm-tree-9.7.jar.sha1 @@ -0,0 +1 @@ +e446a17b175bfb733b87c5c2560ccb4e57d69f1a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-9.6.jar.sha1 deleted file mode 100644 index 2d9e6a9d3cfd6..0000000000000 --- a/modules/lang-painless/licenses/asm-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa205cf0a06dbd8e04ece91c0b37c3f5d567546a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-9.7.jar.sha1 new file mode 100644 index 0000000000000..84c9a9703af6d --- /dev/null +++ b/modules/lang-painless/licenses/asm-9.7.jar.sha1 @@ -0,0 +1 @@ +073d7b3086e14beb604ced229c302feff6449723 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 deleted file mode 100644 index fa42ea1198165..0000000000000 --- a/modules/lang-painless/licenses/asm-analysis-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ce6c7b174bd997fc2552dff47964546bd7a5ec3 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 new file mode 100644 index 0000000000000..c7687adfeb990 --- /dev/null +++ b/modules/lang-painless/licenses/asm-analysis-9.7.jar.sha1 @@ -0,0 +1 @@ +e4a258b7eb96107106c0599f0061cfc1832fe07a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 deleted file mode 100644 index a0814f495771f..0000000000000 --- a/modules/lang-painless/licenses/asm-commons-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1a9e5508eff490744144565c47326c8648be309 \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 new file mode 100644 index 0000000000000..1de4404e7d5d0 --- /dev/null +++ b/modules/lang-painless/licenses/asm-commons-9.7.jar.sha1 @@ -0,0 +1 @@ +e86dda4696d3c185fcc95d8d311904e7ce38a53f \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 deleted file mode 100644 index 101eb03b4b736..0000000000000 --- a/modules/lang-painless/licenses/asm-tree-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c0cdda9d211e965d2a4448aa3fd86110f2f8c2de \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 new file mode 100644 index 0000000000000..d4eeef6151272 --- /dev/null +++ b/modules/lang-painless/licenses/asm-tree-9.7.jar.sha1 @@ -0,0 +1 @@ +e446a17b175bfb733b87c5c2560ccb4e57d69f1a \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 deleted file mode 100644 index 1f42ac62dc69c..0000000000000 --- a/modules/lang-painless/licenses/asm-util-9.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f77caf84eb93786a749b2baa40865b9613e3eaee \ No newline at end of file diff --git a/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 b/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 new file mode 100644 index 0000000000000..37c0d27efe46f --- /dev/null +++ b/modules/lang-painless/licenses/asm-util-9.7.jar.sha1 @@ -0,0 +1 @@ +c0655519f24d92af2202cb681cd7c1569df6ead6 \ No newline at end of file From 380ba0ebaade0e69fb1cf16306fede0213c60862 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 19:57:33 -0400 Subject: [PATCH 07/56] Bump org.apache.commons:commons-configuration2 from 2.10.0 to 2.10.1 in /plugins/repository-hdfs (#12896) * Bump org.apache.commons:commons-configuration2 Bumps org.apache.commons:commons-configuration2 from 2.10.0 to 2.10.1. --- updated-dependencies: - dependency-name: org.apache.commons:commons-configuration2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Peter Nied Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Peter Nied --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- .../licenses/commons-configuration2-2.10.0.jar.sha1 | 1 - .../licenses/commons-configuration2-2.10.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 11772b7b79ef7..17a5c2b538bc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added ### Dependencies +- Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) ### Changed diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index c1f94320f2681..af07fa9e5d80b 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -74,7 +74,7 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" - api 'org.apache.commons:commons-configuration2:2.10.0' + api 'org.apache.commons:commons-configuration2:2.10.1' api 'commons-io:commons-io:2.15.1' api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 deleted file mode 100644 index 17d1b64781e5b..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b93eff3c83e5372262ed4996b609336305a810f \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 new file mode 100644 index 0000000000000..d4c0f8417d357 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 @@ -0,0 +1 @@ +2b681b3bcddeaa5bf5c2a2939cd77e2f9ad6efda \ No newline at end of file From 9aa7018690aefcac7588d8b0947a1c0f11be7a5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 19:58:05 -0400 Subject: [PATCH 08/56] Bump net.minidev:json-smart from 2.5.0 to 2.5.1 in /test/fixtures/hdfs-fixture (#12893) * Bump net.minidev:json-smart in /test/fixtures/hdfs-fixture Bumps [net.minidev:json-smart](https://github.com/netplex/json-smart-v2) from 2.5.0 to 2.5.1. - [Release notes](https://github.com/netplex/json-smart-v2/releases) - [Commits](https://github.com/netplex/json-smart-v2/compare/2.5.0...2.5.1) --- updated-dependencies: - dependency-name: net.minidev:json-smart dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Signed-off-by: Peter Nied Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Peter Nied --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17a5c2b538bc8..a60786b7cbcd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) +- Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 0ca4797bfeff1..a6275f200217a 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -62,7 +62,7 @@ dependencies { api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" - api 'net.minidev:json-smart:2.5.0' + api 'net.minidev:json-smart:2.5.1' api "org.mockito:mockito-core:${versions.mockito}" api "com.google.protobuf:protobuf-java:${versions.protobuf}" api "org.jetbrains.kotlin:kotlin-stdlib:${versions.kotlin}" From 3907ec9ba25cf620e0a7c465d351d21a4ea1c76a Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 26 Mar 2024 09:03:44 +0530 Subject: [PATCH 09/56] Refactor remote store flow to support any path type (#12822) Signed-off-by: Ashish Singh --- .../metadata/MetadataCreateIndexService.java | 24 ++-- .../org/opensearch/index/IndexService.java | 3 +- .../org/opensearch/index/IndexSettings.java | 9 ++ .../index/remote/RemoteStoreDataEnums.java | 65 ++++++++++ .../index/remote/RemoteStorePathType.java | 43 ++++++- ....java => RemoteStorePathTypeResolver.java} | 17 ++- .../opensearch/index/shard/IndexShard.java | 11 +- .../opensearch/index/shard/StoreRecovery.java | 4 +- .../store/RemoteSegmentStoreDirectory.java | 7 +- .../RemoteSegmentStoreDirectoryFactory.java | 35 ++++-- .../RemoteStoreLockManagerFactory.java | 38 ++---- .../index/translog/RemoteFsTranslog.java | 47 +++++--- .../transfer/TranslogTransferManager.java | 29 +++-- .../blobstore/BlobStoreRepository.java | 16 ++- .../opensearch/snapshots/RestoreService.java | 2 +- .../remote/RemoteStorePathTypeTests.java | 111 ++++++++++++++++++ .../RemoteSegmentStoreDirectoryTests.java | 16 ++- .../RemoteStoreLockManagerFactoryTests.java | 9 +- .../index/translog/RemoteFsTranslogTests.java | 4 +- .../TranslogTransferManagerTests.java | 38 ++++-- 20 files changed, 418 insertions(+), 110 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java rename server/src/main/java/org/opensearch/index/remote/{RemoteStorePathResolver.java => RemoteStorePathTypeResolver.java} (50%) create mode 100644 server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index f6a14d8ec9d63..aee0473be95eb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,8 +88,8 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathResolver; import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathTypeResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -113,6 +113,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -170,7 +171,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathResolver remoteStorePathResolver; + private final RemoteStorePathTypeResolver remoteStorePathTypeResolver; public MetadataCreateIndexService( final Settings settings, @@ -203,8 +204,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); - remoteStorePathResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathResolver(clusterService.getClusterSettings()) + remoteStorePathTypeResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStorePathTypeResolver(clusterService.getClusterSettings()) : null; } @@ -553,7 +554,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - addRemoteCustomData(tmpImdBuilder); + addRemoteStorePathTypeInCustomData(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -562,8 +563,14 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( return tempMetadata; } - public void addRemoteCustomData(IndexMetadata.Builder tmpImdBuilder) { - if (remoteStorePathResolver != null) { + /** + * Adds the remote store path type information in custom data of index metadata. + * + * @param tmpImdBuilder index metadata builder. + * @param assertNullOldType flag to verify that the old remote store path type is null + */ + public void addRemoteStorePathTypeInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStorePathTypeResolver != null) { // It is possible that remote custom data exists already. In such cases, we need to only update the path type // in the remote store custom data map. Map existingRemoteCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); @@ -571,8 +578,9 @@ public void addRemoteCustomData(IndexMetadata.Builder tmpImdBuilder) { ? new HashMap<>() : new HashMap<>(existingRemoteCustomData); // Determine the path type for use using the remoteStorePathResolver. - String newPathType = remoteStorePathResolver.resolveType().toString(); + String newPathType = remoteStorePathTypeResolver.getType().toString(); String oldPathType = remoteCustomData.put(RemoteStorePathType.NAME, newPathType); + assert !assertNullOldType || Objects.isNull(oldPathType); logger.trace(() -> new ParameterizedMessage("Added new path type {}, replaced old path type {}", newPathType, oldPathType)); tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 11dc4474cfa42..109bda65b1fd8 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -507,7 +507,8 @@ public synchronized IndexShard createShard( remoteDirectory = ((RemoteSegmentStoreDirectoryFactory) remoteDirectoryFactory).newDirectory( RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), this.indexSettings.getUUID(), - shardId + shardId, + this.indexSettings.getRemoteStorePathType() ); } remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 7e49726c259cb..7e3d812974c79 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -48,6 +48,7 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; @@ -59,6 +60,7 @@ import java.util.Collections; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; @@ -1905,4 +1907,11 @@ public double getDocIdFuzzySetFalsePositiveProbability() { public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePositiveProbability) { this.docIdFuzzySetFalsePositiveProbability = docIdFuzzySetFalsePositiveProbability; } + + public RemoteStorePathType getRemoteStorePathType() { + Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); + return remoteCustomData != null && remoteCustomData.containsKey(RemoteStorePathType.NAME) + ? RemoteStorePathType.parseString(remoteCustomData.get(RemoteStorePathType.NAME)) + : RemoteStorePathType.FIXED; + } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java new file mode 100644 index 0000000000000..1afd73bf1f1b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import java.util.Set; + +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; + +/** + * This class contains the different enums related to remote store data categories and types. + * + * @opensearch.internal + */ +public class RemoteStoreDataEnums { + + /** + * Categories of the data in Remote store. + */ + public enum DataCategory { + SEGMENTS("segments", Set.of(DataType.values())), + TRANSLOG("translog", Set.of(DATA, METADATA)); + + private final String name; + private final Set supportedDataTypes; + + DataCategory(String name, Set supportedDataTypes) { + this.name = name; + this.supportedDataTypes = supportedDataTypes; + } + + public boolean isSupportedDataType(DataType dataType) { + return supportedDataTypes.contains(dataType); + } + + public String getName() { + return name; + } + } + + /** + * Types of data in remote store. + */ + public enum DataType { + DATA("data"), + METADATA("metadata"), + LOCK_FILES("lock_files"); + + private final String name; + + DataType(String name) { + this.name = name; + } + + public String getName() { + return name; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java index a64e07ab1f66f..d7d7a8cdfd644 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java @@ -8,6 +8,10 @@ package org.opensearch.index.remote; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; + import java.util.Locale; /** @@ -18,13 +22,46 @@ */ public enum RemoteStorePathType { - FIXED, - HASHED_PREFIX; + FIXED { + @Override + public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { + return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); + } + }, + HASHED_PREFIX { + @Override + public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { + // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. + // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); + } + }; + + /** + * @param basePath base path of the underlying blob store repository + * @param indexUUID of the index + * @param shardId shard id + * @param dataCategory is either translog or segment + * @param dataType can be one of data, metadata or lock_files. + * @return the blob path for the underlying remote store path type. + */ + public BlobPath path(BlobPath basePath, String indexUUID, String shardId, DataCategory dataCategory, DataType dataType) { + assert dataCategory.isSupportedDataType(dataType) : "category:" + + dataCategory + + " type:" + + dataType + + " are not supported together"; + return generatePath(basePath, indexUUID, shardId, dataCategory.getName(), dataType.getName()); + } + + abstract BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType); public static RemoteStorePathType parseString(String remoteStoreBlobPathType) { try { return RemoteStorePathType.valueOf(remoteStoreBlobPathType.toUpperCase(Locale.ROOT)); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null throw new IllegalArgumentException("Could not parse RemoteStorePathType for [" + remoteStoreBlobPathType + "]"); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java similarity index 50% rename from server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java rename to server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java index 6e8126fcce0ca..5d014c9862d45 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java @@ -16,15 +16,20 @@ * * @opensearch.internal */ -public class RemoteStorePathResolver { +public class RemoteStorePathTypeResolver { - private final ClusterSettings clusterSettings; + private volatile RemoteStorePathType type; - public RemoteStorePathResolver(ClusterSettings clusterSettings) { - this.clusterSettings = clusterSettings; + public RemoteStorePathTypeResolver(ClusterSettings clusterSettings) { + type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); + clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); } - public RemoteStorePathType resolveType() { - return clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); + public RemoteStorePathType getType() { + return type; + } + + public void setType(RemoteStorePathType type) { + this.type = type; } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 72ce858661031..1d7aa6ac4958b 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4932,7 +4932,7 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool()); + RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathType()); } /* @@ -4949,7 +4949,14 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.download(repository, shardId, getThreadPool(), shardPath().resolveTranslog(), logger); + RemoteFsTranslog.download( + repository, + shardId, + getThreadPool(), + shardPath().resolveTranslog(), + indexSettings.getRemoteStorePathType(), + logger + ); } /** diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 5f09b1a0802f3..9779a2320d79f 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -58,6 +58,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineException; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; @@ -409,7 +410,8 @@ void recoverFromSnapshotAndRemoteStore( RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, - shardId + shardId, + RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); sourceRemoteDirectory.initializeToSpecificCommit( primaryTerm, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index c9a238c6e3350..999625e0e579f 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -31,6 +31,7 @@ import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; @@ -897,13 +898,15 @@ public static void remoteDirectoryCleanup( RemoteSegmentStoreDirectoryFactory remoteDirectoryFactory, String remoteStoreRepoForIndex, String indexUUID, - ShardId shardId + ShardId shardId, + RemoteStorePathType pathType ) { try { RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + pathType ); remoteSegmentStoreDirectory.deleteStaleSegments(0); remoteSegmentStoreDirectory.deleteIfEmpty(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index d6d3f1fca833c..f0ecd96bcf1f7 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -13,6 +13,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; @@ -24,8 +25,13 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.Objects; import java.util.function.Supplier; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; + /** * Factory for a remote store directory * @@ -33,8 +39,6 @@ */ @PublicApi(since = "2.3.0") public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.DirectoryFactory { - private static final String SEGMENTS = "segments"; - private final Supplier repositoriesService; private final ThreadPool threadPool; @@ -48,29 +52,38 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - return newDirectory(repositoryName, indexUUID, path.getShardId()); + return newDirectory(repositoryName, indexUUID, path.getShardId(), indexSettings.getRemoteStorePathType()); } - public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId) throws IOException { + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathType pathType) + throws IOException { + assert Objects.nonNull(pathType); try (Repository repository = repositoriesService.get().repository(repositoryName)) { + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = ((BlobStoreRepository) repository); - BlobPath commonBlobPath = blobStoreRepository.basePath(); - commonBlobPath = commonBlobPath.add(indexUUID).add(String.valueOf(shardId.id())).add(SEGMENTS); + BlobPath repositoryBasePath = blobStoreRepository.basePath(); + String shardIdStr = String.valueOf(shardId.id()); + // Derive the path for data directory of SEGMENTS + BlobPath dataPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, DATA); RemoteDirectory dataDirectory = new RemoteDirectory( - blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("data")), + blobStoreRepository.blobStore().blobContainer(dataPath), blobStoreRepository::maybeRateLimitRemoteUploadTransfers, blobStoreRepository::maybeRateLimitRemoteDownloadTransfers ); - RemoteDirectory metadataDirectory = new RemoteDirectory( - blobStoreRepository.blobStore().blobContainer(commonBlobPath.add("metadata")) - ); + + // Derive the path for metadata directory of SEGMENTS + BlobPath mdPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, METADATA); + RemoteDirectory metadataDirectory = new RemoteDirectory(blobStoreRepository.blobStore().blobContainer(mdPath)); + + // The path for lock is derived within the RemoteStoreLockManagerFactory RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( repositoriesService.get(), repositoryName, indexUUID, - String.valueOf(shardId.id()) + shardIdStr, + pathType ); return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index 00666ada11983..c033e4f7ad0aa 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -11,15 +11,18 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; -import java.io.IOException; import java.util.function.Supplier; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; + /** * Factory for remote store lock manager * @@ -27,34 +30,29 @@ */ @PublicApi(since = "2.8.0") public class RemoteStoreLockManagerFactory { - private static final String SEGMENTS = "segments"; - private static final String LOCK_FILES = "lock_files"; private final Supplier repositoriesService; public RemoteStoreLockManagerFactory(Supplier repositoriesService) { this.repositoriesService = repositoriesService; } - public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { - return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId); + public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId, RemoteStorePathType pathType) { + return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId, pathType); } public static RemoteStoreMetadataLockManager newLockManager( RepositoriesService repositoriesService, String repositoryName, String indexUUID, - String shardId - ) throws IOException { + String shardId, + RemoteStorePathType pathType + ) { try (Repository repository = repositoriesService.repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; - BlobPath shardLevelBlobPath = ((BlobStoreRepository) repository).basePath().add(indexUUID).add(shardId).add(SEGMENTS); - RemoteBufferedOutputDirectory shardMDLockDirectory = createRemoteBufferedOutputDirectory( - repository, - shardLevelBlobPath, - LOCK_FILES - ); - - return new RemoteStoreMetadataLockManager(shardMDLockDirectory); + BlobPath repositoryBasePath = ((BlobStoreRepository) repository).basePath(); + BlobPath lockDirectoryPath = pathType.path(repositoryBasePath, indexUUID, shardId, SEGMENTS, LOCK_FILES); + BlobContainer lockDirectoryBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(lockDirectoryPath); + return new RemoteStoreMetadataLockManager(new RemoteBufferedOutputDirectory(lockDirectoryBlobContainer)); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be present to acquire/release lock", e); } @@ -65,14 +63,4 @@ public static RemoteStoreMetadataLockManager newLockManager( public Supplier getRepositoriesService() { return repositoriesService; } - - private static RemoteBufferedOutputDirectory createRemoteBufferedOutputDirectory( - Repository repository, - BlobPath commonBlobPath, - String extention - ) { - BlobPath extendedPath = commonBlobPath.add(extention); - BlobContainer dataBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(extendedPath); - return new RemoteBufferedOutputDirectory(dataBlobContainer); - } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 43eec01b2d365..f0fb03cc905a4 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.logging.Loggers; @@ -18,6 +19,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; @@ -38,6 +40,7 @@ import java.util.HashSet; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; @@ -47,6 +50,10 @@ import java.util.function.LongConsumer; import java.util.function.LongSupplier; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; + /** * A Translog implementation which syncs local FS with a remote store * The current impl uploads translog , ckp and metadata to remote store @@ -74,7 +81,6 @@ public class RemoteFsTranslog extends Translog { private static final int REMOTE_DELETION_PERMITS = 2; private static final int DOWNLOAD_RETRIES = 2; - public static final String TRANSLOG = "translog"; // Semaphore used to allow only single remote generation to happen at a time private final Semaphore remoteGenerationDeletionPermits = new Semaphore(REMOTE_DELETION_PERMITS); @@ -106,7 +112,8 @@ public RemoteFsTranslog( threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + indexSettings().getRemoteStorePathType() ); try { download(translogTransferManager, location, logger); @@ -150,8 +157,14 @@ RemoteTranslogTransferTracker getRemoteTranslogTracker() { return remoteTranslogTransferTracker; } - public static void download(Repository repository, ShardId shardId, ThreadPool threadPool, Path location, Logger logger) - throws IOException { + public static void download( + Repository repository, + ShardId shardId, + ThreadPool threadPool, + Path location, + RemoteStorePathType pathType, + Logger logger + ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( Locale.ROOT, "%s repository should be instance of BlobStoreRepository", @@ -167,7 +180,8 @@ public static void download(Repository repository, ShardId shardId, ThreadPool t threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + pathType ); RemoteFsTranslog.download(translogTransferManager, location, logger); logger.trace(remoteTranslogTransferTracker.toString()); @@ -244,15 +258,16 @@ public static TranslogTransferManager buildTranslogTransferManager( ThreadPool threadPool, ShardId shardId, FileTransferTracker fileTransferTracker, - RemoteTranslogTransferTracker remoteTranslogTransferTracker + RemoteTranslogTransferTracker tracker, + RemoteStorePathType pathType ) { - return new TranslogTransferManager( - shardId, - new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool), - blobStoreRepository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG), - fileTransferTracker, - remoteTranslogTransferTracker - ); + assert Objects.nonNull(pathType); + String indexUUID = shardId.getIndex().getUUID(); + String shardIdStr = String.valueOf(shardId.id()); + BlobPath dataPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, DATA); + BlobPath mdPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, METADATA); + BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); + return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker); } @Override @@ -524,7 +539,8 @@ private void deleteStaleRemotePrimaryTerms() { } } - public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool) throws IOException { + public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathType pathType) + throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; // We use a dummy stats tracker to ensure the flow doesn't break. @@ -536,7 +552,8 @@ public static void cleanup(Repository repository, ShardId shardId, ThreadPool th threadPool, shardId, fileTransferTracker, - remoteTranslogTransferTracker + remoteTranslogTransferTracker, + pathType ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 2f6055df87804..c9e07ca3ef8c1 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -58,7 +58,6 @@ public class TranslogTransferManager { private final TransferService transferService; private final BlobPath remoteDataTransferPath; private final BlobPath remoteMetadataTransferPath; - private final BlobPath remoteBaseTransferPath; private final FileTransferTracker fileTransferTracker; private final RemoteTranslogTransferTracker remoteTranslogTransferTracker; @@ -67,8 +66,6 @@ public class TranslogTransferManager { private static final int METADATA_FILES_TO_FETCH = 10; private final Logger logger; - private final static String METADATA_DIR = "metadata"; - private final static String DATA_DIR = "data"; private static final VersionedCodecStreamWrapper metadataStreamWrapper = new VersionedCodecStreamWrapper<>( new TranslogTransferMetadataHandler(), @@ -79,15 +76,15 @@ public class TranslogTransferManager { public TranslogTransferManager( ShardId shardId, TransferService transferService, - BlobPath remoteBaseTransferPath, + BlobPath remoteDataTransferPath, + BlobPath remoteMetadataTransferPath, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker remoteTranslogTransferTracker ) { this.shardId = shardId; this.transferService = transferService; - this.remoteBaseTransferPath = remoteBaseTransferPath; - this.remoteDataTransferPath = remoteBaseTransferPath.add(DATA_DIR); - this.remoteMetadataTransferPath = remoteBaseTransferPath.add(METADATA_DIR); + this.remoteDataTransferPath = remoteDataTransferPath; + this.remoteMetadataTransferPath = remoteMetadataTransferPath; this.fileTransferTracker = fileTransferTracker; this.logger = Loggers.getLogger(getClass(), shardId); this.remoteTranslogTransferTracker = remoteTranslogTransferTracker; @@ -456,17 +453,27 @@ public void onFailure(Exception e) { ); } + /** + * Deletes all the translog content related to the underlying shard. + */ public void delete() { - // cleans up all the translog contents in async fashion - transferService.deleteAsync(ThreadPool.Names.REMOTE_PURGE, remoteBaseTransferPath, new ActionListener<>() { + // Delete the translog data content from the remote store. + delete(remoteDataTransferPath); + // Delete the translog metadata content from the remote store. + delete(remoteMetadataTransferPath); + } + + private void delete(BlobPath path) { + // cleans up all the translog contents in async fashion for the given path + transferService.deleteAsync(ThreadPool.Names.REMOTE_PURGE, path, new ActionListener<>() { @Override public void onResponse(Void unused) { - logger.info("Deleted all remote translog data"); + logger.info("Deleted all remote translog data at path={}", path); } @Override public void onFailure(Exception e) { - logger.error("Exception occurred while cleaning translog", e); + logger.error(new ParameterizedMessage("Exception occurred while cleaning translog at path={}", path), e); } }); } diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 076173177feee..a7c2fb03285b0 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -108,6 +108,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -669,7 +670,8 @@ public void cloneRemoteStoreIndexShardSnapshot( RemoteStoreLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepository, indexUUID, - String.valueOf(shardId.shardId()) + String.valueOf(shardId.shardId()), + RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManger.cloneLock( FileLockInfo.getLockInfoBuilder().withAcquirerId(source.getUUID()).build(), @@ -1107,7 +1109,8 @@ public static void remoteDirectoryCleanupAsync( String remoteStoreRepoForIndex, String indexUUID, ShardId shardId, - String threadPoolName + String threadPoolName, + RemoteStorePathType pathType ) { threadpool.executor(threadPoolName) .execute( @@ -1116,7 +1119,8 @@ public static void remoteDirectoryCleanupAsync( remoteDirectoryFactory, remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + pathType ), indexUUID, shardId @@ -1147,7 +1151,8 @@ protected void releaseRemoteStoreLockAndCleanup( RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepoForIndex, indexUUID, - shardId + shardId, + RemoteStorePathType.HASHED_PREFIX // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); @@ -1169,7 +1174,8 @@ protected void releaseRemoteStoreLockAndCleanup( remoteStoreRepoForIndex, indexUUID, new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), - ThreadPool.Names.REMOTE_PURGE + ThreadPool.Names.REMOTE_PURGE, + RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index e5ac604e0a5e3..da2a36cbb0701 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -451,7 +451,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - createIndexService.addRemoteCustomData(indexMdBuilder); + createIndexService.addRemoteStorePathTypeInCustomData(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java new file mode 100644 index 0000000000000..0f108d1b45f5a --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java @@ -0,0 +1,111 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStorePathType.FIXED; +import static org.opensearch.index.remote.RemoteStorePathType.parseString; + +public class RemoteStorePathTypeTests extends OpenSearchTestCase { + + private static final String SEPARATOR = "/"; + + public void testParseString() { + // Case 1 - Pass values from the enum. + String typeString = FIXED.toString(); + RemoteStorePathType type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + assertEquals(FIXED, type); + + typeString = RemoteStorePathType.HASHED_PREFIX.toString(); + type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + assertEquals(RemoteStorePathType.HASHED_PREFIX, type); + + // Case 2 - Pass random string + String randomTypeString = randomAlphaOfLength(2); + IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> parseString(randomTypeString)); + assertEquals("Could not parse RemoteStorePathType for [" + randomTypeString + "]", ex.getMessage()); + + // Case 3 - Null string + ex = assertThrows(IllegalArgumentException.class, () -> parseString(null)); + assertEquals("Could not parse RemoteStorePathType for [null]", ex.getMessage()); + } + + public void testGeneratePathForFixedType() { + BlobPath blobPath = new BlobPath(); + List pathList = getPathList(); + for (String path : pathList) { + blobPath = blobPath.add(path); + } + + String indexUUID = randomAlphaOfLength(10); + String shardId = String.valueOf(randomInt(100)); + DataCategory dataCategory = TRANSLOG; + DataType dataType = DATA; + + String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId + SEPARATOR; + // Translog Data + BlobPath result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Translog Metadata + dataType = METADATA; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Translog Lock files - This is a negative case where the assertion will trip. + BlobPath finalBlobPath = blobPath; + assertThrows(AssertionError.class, () -> FIXED.path(finalBlobPath, indexUUID, shardId, TRANSLOG, LOCK_FILES)); + + // Segment Data + dataCategory = SEGMENTS; + dataType = DATA; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Segment Metadata + dataType = METADATA; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + + // Segment Metadata + dataType = LOCK_FILES; + result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); + } + + private List getPathList() { + List pathList = new ArrayList<>(); + int length = randomIntBetween(0, 5); + for (int i = 0; i < length; i++) { + pathList.add(randomAlphaOfLength(randomIntBetween(2, 5))); + } + return pathList; + } + + private String getPath(List pathList) { + String p = String.join(SEPARATOR, pathList); + if (p.isEmpty() || p.endsWith(SEPARATOR)) { + return p; + } + return p + SEPARATOR; + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 8b69c15dac9d3..59c8a9d92f07b 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -37,6 +37,7 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -697,13 +698,20 @@ public void testCleanupAsync() throws Exception { threadPool, indexShard.shardId() ); - when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any())).thenReturn(remoteSegmentDirectory); + when(remoteSegmentStoreDirectoryFactory.newDirectory(any(), any(), any(), any())).thenReturn(remoteSegmentDirectory); String repositoryName = "test-repository"; String indexUUID = "test-idx-uuid"; ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); - - RemoteSegmentStoreDirectory.remoteDirectoryCleanup(remoteSegmentStoreDirectoryFactory, repositoryName, indexUUID, shardId); - verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId); + RemoteStorePathType pathType = randomFrom(RemoteStorePathType.values()); + + RemoteSegmentStoreDirectory.remoteDirectoryCleanup( + remoteSegmentStoreDirectoryFactory, + repositoryName, + indexUUID, + shardId, + pathType + ); + verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathType); verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); verify(remoteMetadataDirectory).delete(); verify(remoteDataDirectory).delete(); diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java index 897785849cf7b..0fe5557737447 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java @@ -11,6 +11,7 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.index.remote.RemoteStorePathType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; @@ -48,6 +49,7 @@ public void testNewLockManager() throws IOException { String testRepository = "testRepository"; String testIndexUUID = "testIndexUUID"; String testShardId = "testShardId"; + RemoteStorePathType pathType = RemoteStorePathType.FIXED; BlobStoreRepository repository = mock(BlobStoreRepository.class); BlobStore blobStore = mock(BlobStore.class); @@ -59,7 +61,12 @@ public void testNewLockManager() throws IOException { when(repositoriesService.repository(testRepository)).thenReturn(repository); - RemoteStoreLockManager lockManager = remoteStoreLockManagerFactory.newLockManager(testRepository, testIndexUUID, testShardId); + RemoteStoreLockManager lockManager = remoteStoreLockManagerFactory.newLockManager( + testRepository, + testIndexUUID, + testShardId, + pathType + ); assertTrue(lockManager != null); ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 7ff4c3ecf5236..9f72d3c7bd825 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -99,7 +99,7 @@ import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; -import static org.opensearch.index.translog.RemoteFsTranslog.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.contains; @@ -907,7 +907,7 @@ public void testDrainSync() throws Exception { } private BlobPath getTranslogDirectory() { - return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG); + return repository.basePath().add(shardId.getIndex().getUUID()).add(String.valueOf(shardId.id())).add(TRANSLOG.getName()); } private Long populateTranslogOps(boolean withMissingOps) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index e34bc078896f9..a9502dc051428 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -48,6 +48,8 @@ import org.mockito.Mockito; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; @@ -95,7 +97,8 @@ public void setUp() throws Exception { translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -159,7 +162,8 @@ public void onFailure(TransferFileSnapshot fileSnapshot, Exception e) { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -194,7 +198,8 @@ public void testTransferSnapshotOnUploadTimeout() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -235,7 +240,8 @@ public void testTransferSnapshotOnThreadInterrupt() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), fileTransferTracker, remoteTranslogTransferTracker ); @@ -333,7 +339,8 @@ public void testReadMetadataNoFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -354,7 +361,8 @@ public void testReadMetadataFile() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -390,7 +398,8 @@ public void testReadMetadataReadException() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -426,7 +435,8 @@ public void testReadMetadataListException() throws IOException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -499,7 +509,8 @@ public void testDeleteTranslogSuccess() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, blobStoreTransferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -518,7 +529,8 @@ public void testDeleteStaleTranslogMetadata() { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); @@ -569,7 +581,8 @@ public void testDeleteTranslogFailure() throws Exception { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, blobStoreTransferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), tracker, remoteTranslogTransferTracker ); @@ -612,7 +625,8 @@ public void testMetadataConflict() throws InterruptedException { TranslogTransferManager translogTransferManager = new TranslogTransferManager( shardId, transferService, - remoteBaseTransferPath, + remoteBaseTransferPath.add(TRANSLOG.getName()), + remoteBaseTransferPath.add(METADATA.getName()), null, remoteTranslogTransferTracker ); From f6d6fd3564653be3e8f32cfcdb2c3ef5a45de584 Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 26 Mar 2024 11:39:45 +0530 Subject: [PATCH 10/56] Add missed API visibility annotations for public APIs (#12920) Signed-off-by: Ashish Singh --- .../main/java/org/opensearch/common/blobstore/BlobPath.java | 4 +++- .../org/opensearch/index/remote/RemoteStoreDataEnums.java | 6 +++++- .../org/opensearch/index/remote/RemoteStorePathType.java | 2 ++ 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index c54536e9c46e2..763594ed52977 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -33,6 +33,7 @@ package org.opensearch.common.blobstore; import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; import java.util.ArrayList; import java.util.Collections; @@ -42,8 +43,9 @@ /** * The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "1.0.0") public class BlobPath implements Iterable { private static final String SEPARATOR = "/"; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java index 1afd73bf1f1b3..475e29004ba2e 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java @@ -8,6 +8,8 @@ package org.opensearch.index.remote; +import org.opensearch.common.annotation.PublicApi; + import java.util.Set; import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; @@ -16,13 +18,14 @@ /** * This class contains the different enums related to remote store data categories and types. * - * @opensearch.internal + * @opensearch.api */ public class RemoteStoreDataEnums { /** * Categories of the data in Remote store. */ + @PublicApi(since = "2.14.0") public enum DataCategory { SEGMENTS("segments", Set.of(DataType.values())), TRANSLOG("translog", Set.of(DATA, METADATA)); @@ -47,6 +50,7 @@ public String getName() { /** * Types of data in remote store. */ + @PublicApi(since = "2.14.0") public enum DataType { DATA("data"), METADATA("metadata"), diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java index d7d7a8cdfd644..742d7b501f227 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java @@ -8,6 +8,7 @@ package org.opensearch.index.remote; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; @@ -20,6 +21,7 @@ * * @opensearch.internal */ +@PublicApi(since = "2.14.0") public enum RemoteStorePathType { FIXED { From 3b10a06276287b493f4713e464fce543f738f4c4 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 26 Mar 2024 22:31:57 +0800 Subject: [PATCH 11/56] Fix flaky tests in CopyProcessorTests (#12885) * Fix flaky test CopyProcessorTests#testCopyWithRemoveSource Signed-off-by: Gao Binlong * Remove an existing field to get a non-existing field name Signed-off-by: Gao Binlong * Add a new randomFieldName method Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- .../opensearch/ingest/common/CopyProcessorTests.java | 5 +++-- .../org/opensearch/ingest/RandomDocumentPicks.java | 11 +++++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java index 3259ba85ef340..b53ce2db994a8 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java @@ -24,7 +24,7 @@ public class CopyProcessorTests extends OpenSearchTestCase { public void testCopyExistingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String targetFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument); Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); @@ -71,7 +71,8 @@ public void testCopyWithIgnoreMissing() throws Exception { public void testCopyWithRemoveSource() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); - String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + String targetFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, true, false); diff --git a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java index c478bf9239f74..0e42787b16be8 100644 --- a/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java +++ b/test/framework/src/main/java/org/opensearch/ingest/RandomDocumentPicks.java @@ -71,6 +71,17 @@ public static String randomFieldName(Random random) { return fieldName.toString(); } + /** + * Returns a random field name that doesn't exist in the document. + */ + public static String randomNonExistingFieldName(Random random, IngestDocument ingestDocument) { + String fieldName; + do { + fieldName = randomFieldName(random); + } while (canAddField(fieldName, ingestDocument) == false); + return fieldName; + } + /** * Returns a random leaf field name. */ From a4d6d55ec78cde53c80c3e0626904d594bd518db Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 26 Mar 2024 14:08:49 -0400 Subject: [PATCH 12/56] Bump netty from 4.1.107.Final to 4.1.108.Final (#12924) Signed-off-by: Andriy Redko --- CHANGELOG.md | 3 ++- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.107.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.108.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.107.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.108.Final.jar.sha1 | 1 + 90 files changed, 47 insertions(+), 46 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a60786b7cbcd7..03aa8b8ec7f45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -108,6 +108,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) +- Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) @@ -122,4 +123,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 2a9fdb7cdddaf..5c9cd25bb79ad 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -26,7 +26,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.107.Final +netty = 4.1.108.Final joda = 2.12.2 # project reactor diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index d823de7ffadd4..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..93207338f7db8 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 deleted file mode 100644 index 114d77a1bb95f..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b7070e9acfe262bb0bd936c4051116631796b3b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..e850aad5f3656 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +3ad0af28e408092f0d12994802a9f3fe18d45f8c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5a4bde479eb38..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ebc495e9b2bc2c9ab60a264b40f62dc0671d9f6e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d4ae1b7e71661 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +62b6a5dfee2e22ab9015a469cb68e4727596fd4c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index a62cb0fefcc40..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..8d299e265646d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 deleted file mode 100644 index 0e3595fecb0d2..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3019703b67413ef3d6150da1f49753f4010507ce \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..5f0eed9c5d7e4 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ad97680373f9c9f278f597ad6552d44e20418929 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 deleted file mode 100644 index 6b9a35acb2c20..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9234407d6a46745599735765c4d3755c7fc84162 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..6ed00ff79dea9 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +baf7b939ef71b25713cacbe47bef8caf80ce99c6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 deleted file mode 100644 index beb44fc0f4cf9..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8509a72b8a5a2d33d611e99254aed39765c3ad82 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..1021bfbec06ad --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +2a9d06026ed251705e6ab52fa6ebe5f4f15aab7a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 deleted file mode 100644 index 4c74bb06fd83b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a1d32debf2ed07c5852ab5b2904c43adb76c39e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..28bef74acca6d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +c2ef6018eecde345fcddb96e31f651df16dca4c2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index d823de7ffadd4..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -945e8ad5ab7ec4f11fb0257d2594af0cfae1d4b7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..93207338f7db8 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +93cc78652ed836ef950604139bfb4afb45e0bc7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 deleted file mode 100644 index 38eb2e5bad80a..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04d8e9e51b7254bd26a42fe17bdcae32e4c6ebb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..82fb94debd45d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +dd44733e94f3f6237c896f2bbe9927c1eba48543 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 deleted file mode 100644 index 5b3d3311edc9f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3885ffe7dd05c9773df70c61009f34a5a8a383ec \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..018cf546ca622 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +ed90430e545529a2df7c1db6c94568ea00867a61 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index bbe91c6ccfb1d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f17a547530d64becd7179507b25f4154bcfba57 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..0f459553b16e0 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +30617b39cc6f850ca3807459fe726fbcd63989f2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 deleted file mode 100644 index ba27b38632622..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d4c6b05f4d9aca117981297fb7f02953102ebb5e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..854891ce4dafe --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +d186a0be320e6a139c42d9b018596ef9d4a0b4ca \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 deleted file mode 100644 index 3bc0f7b3fed09..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dfee84308341a42131dd0f8ac0e1e02d627c19f3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..3a95ebfdbe6a1 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +f3085568e45c2ca74118118f792d0d55968aeb13 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 deleted file mode 100644 index a62cb0fefcc40..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d655d09e972dee46f580dbcf41c0d1356aea9e1b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..8d299e265646d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +84d160a3b20f1de896df0cfafe6638199d49efb8 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 deleted file mode 100644 index 19419999300dd..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6a105c621b47d1410e0e09419d7209d2d46e914 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..d1e2ada6f8c84 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +1fd80f714c85ca685a80f32e0a4e8fd3b866e310 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 deleted file mode 100644 index 407ecaffdad30..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.107.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d61d4959741109b3eccd7337f11fc89fa90a74a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 new file mode 100644 index 0000000000000..978378686b4ad --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.108.Final.jar.sha1 @@ -0,0 +1 @@ +0df31f1cd96df8b2882b1e0faf4409b0bd704541 \ No newline at end of file From 10fc755b2df90cb755cf5cb4809392ed904aaa3e Mon Sep 17 00:00:00 2001 From: kkewwei Date: Wed, 27 Mar 2024 02:27:15 +0800 Subject: [PATCH 13/56] fix unit test (#12817) Signed-off-by: kkewwei --- .../org/opensearch/cluster/service/TaskBatcherTests.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java index 070fde523fe87..b0916ce9236f7 100644 --- a/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/TaskBatcherTests.java @@ -218,7 +218,8 @@ public void testTasksAreExecutedInOrder() throws BrokenBarrierException, Interru executors[i] = new TaskExecutor(); } - int tasksSubmittedPerThread = randomIntBetween(2, 1024); + // it will create at most 8192 threads, which will cause native memory oom. so we limit the number of created threads. + int tasksSubmittedPerThread = randomIntBetween(2, 128); CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); @@ -286,7 +287,7 @@ public void testNoTasksAreDroppedInParallelSubmission() throws BrokenBarrierExce executors[i] = new TaskExecutor(); } - int tasksSubmittedPerThread = randomIntBetween(2, 1024); + int tasksSubmittedPerThread = randomIntBetween(2, 128); CopyOnWriteArrayList> failures = new CopyOnWriteArrayList<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); From f2cc3d8ea314b2d49a8b6ea5f57de5b6aff4faf9 Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Tue, 26 Mar 2024 13:29:05 -0700 Subject: [PATCH 14/56] Add a counter to node stat (and _cat/shards) api to track shard going from idle to non-idle (#12768) --------- Signed-off-by: Ruirui Zhang --- CHANGELOG.md | 1 + .../test/cat.shards/10_basic.yml | 99 ++++++++++++++++++- .../index/search/stats/SearchStats.java | 26 ++++- .../index/search/stats/ShardSearchStats.java | 9 +- .../opensearch/index/shard/IndexShard.java | 5 + .../index/shard/SearchOperationListener.java | 16 +++ .../rest/action/cat/RestShardsAction.java | 5 + .../index/search/stats/SearchStatsTests.java | 7 +- .../shard/SearchOperationListenerTests.java | 31 ++++++ .../action/cat/RestShardsActionTests.java | 8 +- 10 files changed, 196 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 03aa8b8ec7f45..b8ab06ce681b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index 29fbf55417961..c309f19b454e7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,13 +1,108 @@ "Help": - skip: - version: " - 2.11.99" + version: " - 2.99.99" + reason: search idle reactivate count total is only added in 3.0.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: "3.0.0 - " + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.concurrent_query_current .+ \n + search.concurrent_query_time .+ \n + search.concurrent_query_total .+ \n + search.concurrent_avg_slice_count .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n + search.search_idle_reactivate_count_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + docs.deleted .+ \n + $/ +--- +"Help from 2.12.0 to 2.99.99": + - skip: + version: " - 2.11.99 , 3.0.0 - " reason: deleted docs and concurrent search are added in 2.12.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "2.12.0 - " + version: "2.12.0 - 2.99.99" - match: $body: | diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index 576e00f8f30d1..d3a53fcc0e2d8 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -163,6 +163,8 @@ public static class Stats implements Writeable, ToXContentFragment { private long pitTimeInMillis; private long pitCurrent; + private long searchIdleReactivateCount; + @Nullable private RequestStatsLongHolder requestStatsLongHolder; @@ -193,7 +195,8 @@ public Stats( long pitCurrent, long suggestCount, long suggestTimeInMillis, - long suggestCurrent + long suggestCurrent, + long searchIdleReactivateCount ) { this.requestStatsLongHolder = new RequestStatsLongHolder(); this.queryCount = queryCount; @@ -220,6 +223,8 @@ public Stats( this.pitCount = pitCount; this.pitTimeInMillis = pitTimeInMillis; this.pitCurrent = pitCurrent; + + this.searchIdleReactivateCount = searchIdleReactivateCount; } private Stats(StreamInput in) throws IOException { @@ -255,6 +260,10 @@ private Stats(StreamInput in) throws IOException { concurrentQueryCurrent = in.readVLong(); queryConcurrency = in.readVLong(); } + + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + searchIdleReactivateCount = in.readVLong(); + } } public void add(Stats stats) { @@ -282,6 +291,8 @@ public void add(Stats stats) { pitCount += stats.pitCount; pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; + + searchIdleReactivateCount += stats.searchIdleReactivateCount; } public void addForClosingShard(Stats stats) { @@ -306,6 +317,8 @@ public void addForClosingShard(Stats stats) { pitTimeInMillis += stats.pitTimeInMillis; pitCurrent += stats.pitCurrent; queryConcurrency += stats.queryConcurrency; + + searchIdleReactivateCount += stats.searchIdleReactivateCount; } public long getQueryCount() { @@ -412,6 +425,10 @@ public long getSuggestCurrent() { return suggestCurrent; } + public long getSearchIdleReactivateCount() { + return searchIdleReactivateCount; + } + public static Stats readStats(StreamInput in) throws IOException { return new Stats(in); } @@ -457,6 +474,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(concurrentQueryCurrent); out.writeVLong(queryConcurrency); } + + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeVLong(searchIdleReactivateCount); + } } @Override @@ -486,6 +507,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); + builder.field(Fields.SEARCH_IDLE_REACTIVATE_COUNT_TOTAL, searchIdleReactivateCount); + if (requestStatsLongHolder != null) { builder.startObject(Fields.REQUEST); @@ -654,6 +677,7 @@ static final class Fields { static final String TIME = "time"; static final String CURRENT = "current"; static final String TOTAL = "total"; + static final String SEARCH_IDLE_REACTIVATE_COUNT_TOTAL = "search_idle_reactivate_count_total"; } diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 99e3f8465c5db..3098986852cc1 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -213,6 +213,11 @@ public void onFreePitContext(ReaderContext readerContext) { totalStats.pitMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); } + @Override + public void onSearchIdleReactivation() { + totalStats.searchIdleMetric.inc(); + } + /** * Holder of statistics values * @@ -239,6 +244,7 @@ static final class StatsHolder { final CounterMetric scrollCurrent = new CounterMetric(); final CounterMetric pitCurrent = new CounterMetric(); final CounterMetric suggestCurrent = new CounterMetric(); + final CounterMetric searchIdleMetric = new CounterMetric(); SearchStats.Stats stats() { return new SearchStats.Stats( @@ -260,7 +266,8 @@ SearchStats.Stats stats() { pitCurrent.count(), suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), - suggestCurrent.count() + suggestCurrent.count(), + searchIdleMetric.count() ); } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 1d7aa6ac4958b..551dd338eed2a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4683,9 +4683,14 @@ public void afterRefresh(boolean didRefresh) { * true if the listener was registered to wait for a refresh. */ public final void awaitShardSearchActive(Consumer listener) { + boolean isSearchIdle = isSearchIdle(); markSearcherAccessed(); // move the shard into non-search idle final Translog.Location location = pendingRefreshLocation.get(); if (location != null) { + if (isSearchIdle) { + SearchOperationListener searchOperationListener = getSearchOperationListener(); + searchOperationListener.onSearchIdleReactivation(); + } addRefreshListener(location, (b) -> { pendingRefreshLocation.compareAndSet(location, null); listener.accept(true); diff --git a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java index 849a4f9c15318..94079db468f9c 100644 --- a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java @@ -145,6 +145,11 @@ default void onNewPitContext(ReaderContext readerContext) {} */ default void onFreePitContext(ReaderContext readerContext) {} + /** + * Executed when a shard goes from idle to non-idle state + */ + default void onSearchIdleReactivation() {} + /** * A Composite listener that multiplexes calls to each of the listeners methods. */ @@ -310,5 +315,16 @@ public void onFreePitContext(ReaderContext readerContext) { } } } + + @Override + public void onSearchIdleReactivation() { + for (SearchOperationListener listener : listeners) { + try { + listener.onSearchIdleReactivation(); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("onNewSearchIdleReactivation listener [{}] failed", listener), e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 4cd10c6874e0a..4413c8eb370be 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -253,6 +253,10 @@ protected Table getTableWithHeader(final RestRequest request) { "search.point_in_time_total", "alias:spto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" ); + table.addCell( + "search.search_idle_reactivate_count_total", + "alias:ssirct,searchSearchIdleReactivateCountTotal;default:false;text-align:right;desc:number of times a shard reactivated" + ); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -427,6 +431,7 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getSearchIdleReactivateCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 5656b77445772..594700ea60b3e 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -57,9 +57,9 @@ public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map groupStats1 = new HashMap<>(); Map groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -128,6 +128,7 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); + assertEquals(equalTo, stats.getSearchIdleReactivateCount()); // avg_concurrency is not summed up across stats assertEquals(1, stats.getConcurrentAvgSliceCount(), 0); } diff --git a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java index 98f86758ea2ca..c61c13eecf2c3 100644 --- a/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SearchOperationListenerTests.java @@ -64,6 +64,7 @@ public void testListenersAreExecuted() { AtomicInteger newScrollContext = new AtomicInteger(); AtomicInteger freeScrollContext = new AtomicInteger(); AtomicInteger validateSearchContext = new AtomicInteger(); + AtomicInteger searchIdleReactivateCount = new AtomicInteger(); AtomicInteger timeInNanos = new AtomicInteger(randomIntBetween(0, 10)); SearchOperationListener listener = new SearchOperationListener() { @Override @@ -133,6 +134,11 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertNotNull(readerContext); validateSearchContext.incrementAndGet(); } + + @Override + public void onSearchIdleReactivation() { + searchIdleReactivateCount.incrementAndGet(); + } }; SearchOperationListener throwingListener = (SearchOperationListener) Proxy.newProxyInstance( @@ -169,6 +175,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFetchPhase(ctx, timeInNanos.get()); @@ -182,6 +189,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onPreQueryPhase(ctx); @@ -195,6 +203,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onPreFetchPhase(ctx); @@ -208,6 +217,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFailedFetchPhase(ctx); @@ -221,6 +231,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFailedQueryPhase(ctx); @@ -234,6 +245,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onNewReaderContext(mock(ReaderContext.class)); @@ -247,6 +259,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(0, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onNewScrollContext(mock(ReaderContext.class)); @@ -260,6 +273,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(0, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFreeReaderContext(mock(ReaderContext.class)); @@ -273,6 +287,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(0, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); compositeListener.onFreeScrollContext(mock(ReaderContext.class)); @@ -286,6 +301,21 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(2, freeScrollContext.get()); + assertEquals(0, searchIdleReactivateCount.get()); + assertEquals(0, validateSearchContext.get()); + + compositeListener.onSearchIdleReactivation(); + assertEquals(2, preFetch.get()); + assertEquals(2, preQuery.get()); + assertEquals(2, failedFetch.get()); + assertEquals(2, failedQuery.get()); + assertEquals(2, onQuery.get()); + assertEquals(2, onFetch.get()); + assertEquals(2, newContext.get()); + assertEquals(2, newScrollContext.get()); + assertEquals(2, freeContext.get()); + assertEquals(2, freeScrollContext.get()); + assertEquals(2, searchIdleReactivateCount.get()); assertEquals(0, validateSearchContext.get()); if (throwingListeners == 0) { @@ -311,6 +341,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest assertEquals(2, newScrollContext.get()); assertEquals(2, freeContext.get()); assertEquals(2, freeScrollContext.get()); + assertEquals(2, searchIdleReactivateCount.get()); assertEquals(2, validateSearchContext.get()); } } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index fa13ec2036797..883df7da5d717 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -125,7 +125,7 @@ public void testBuildTable() { assertThat(headers.get(6).value, equalTo("ip")); assertThat(headers.get(7).value, equalTo("id")); assertThat(headers.get(8).value, equalTo("node")); - assertThat(headers.get(78).value, equalTo("docs.deleted")); + assertThat(headers.get(79).value, equalTo("docs.deleted")); final List> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -141,9 +141,9 @@ public void testBuildTable() { assertThat(row.get(4).value, equalTo(shardStats.getStats().getDocs().getCount())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(76).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(77).value, equalTo(shardStats.getStatePath())); - assertThat(row.get(78).value, equalTo(shardStats.getStats().getDocs().getDeleted())); + assertThat(row.get(77).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(78).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(79).value, equalTo(shardStats.getStats().getDocs().getDeleted())); } } } From 839ee4c167dae6b9b2352693e16dff7d98f225db Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Tue, 26 Mar 2024 18:09:59 -0400 Subject: [PATCH 15/56] Rebased version of #12706 (#12933) * Fix Flaky SettingTests Tests Signed-off-by: kkewwei * add the comment to unit test Signed-off-by: kkewwei --------- Signed-off-by: kkewwei Co-authored-by: kkewwei --- .../java/org/opensearch/common/settings/SettingTests.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index c6da96b521276..7ebee680e8e52 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -201,7 +201,7 @@ public void testMemorySize() { assertEquals(new ByteSizeValue(12), value.get()); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "20%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2)), value.get()); + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2)), value.get()); } public void testMemorySizeWithFallbackValue() { @@ -219,10 +219,12 @@ public void testMemorySizeWithFallbackValue() { assertEquals(memorySizeValue.getBytes(), JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.2, 1.0); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "30%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.3)), value.get()); + // If value=getHeapMax()*0.3 is bigger than 2gb, and is bigger than Integer.MAX_VALUE, + // then (long)((int) value) will lose precision. + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.3)), value.get()); assertTrue(settingUpdater.apply(Settings.builder().put("b.byte.size", "40%").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue((int) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), value.get()); + assertEquals(new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.4)), value.get()); } public void testSimpleUpdate() { From 618782d8fe8c26298caf912795513b23c33db149 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Tue, 26 Mar 2024 19:34:30 -0700 Subject: [PATCH 16/56] Perform buildAggregation concurrently and support Composite Aggregations (#12697) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../AggregationsIntegrationIT.java | 23 ++++++++++++ .../aggregations/bucket/CompositeAggIT.java | 29 ++++++++------- .../AggregationCollectorManager.java | 14 ++------ .../search/aggregations/Aggregator.java | 20 ++++++++--- .../BucketCollectorProcessor.java | 36 +++++++++++++++++++ .../GlobalAggCollectorManager.java | 15 ++++++++ .../MultiBucketConsumerService.java | 8 ++--- .../NonGlobalAggCollectorManager.java | 15 ++++++++ .../CompositeAggregationFactory.java | 5 +-- .../GlobalOrdinalsStringTermsAggregator.java | 29 ++++++++++++--- .../search/internal/SearchContext.java | 6 ++++ .../AggregationProcessorTests.java | 6 ++++ .../opensearch/test/OpenSearchTestCase.java | 7 +++- 14 files changed, 173 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8ab06ce681b4..cf0a38c081392 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -104,6 +104,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) +- [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java index 6059abce53c8b..4a8b00ea45738 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java @@ -38,6 +38,8 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.search.aggregations.bucket.terms.IncludeExclude; @@ -56,6 +58,8 @@ import java.util.List; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.global; +import static org.opensearch.search.aggregations.AggregationBuilders.stats; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -164,4 +168,23 @@ private void runLargeStringAggregationTest(AggregationBuilder aggregation) { } assertTrue("Exception should have been thrown", exceptionThrown); } + + public void testAggsOnEmptyShards() { + // Create index with 5 shards but only 1 doc + assertAcked( + prepareCreate( + "idx", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 5).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).setMapping("score", "type=integer") + ); + client().prepareIndex("idx").setId("1").setSource("score", "5").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + // Validate global agg does not throw an exception + assertSearchResponse( + client().prepareSearch("idx").addAggregation(global("global").subAggregation(stats("value_stats").field("score"))).get() + ); + + // Validate non-global agg does not throw an exception + assertSearchResponse(client().prepareSearch("idx").addAggregation(stats("value_stats").field("score")).get()); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java index 5a38ba670f1dc..a743f22a2ff77 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/CompositeAggIT.java @@ -26,6 +26,7 @@ import java.util.Collection; import java.util.List; +import static org.opensearch.indices.IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING; import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; @@ -50,23 +51,25 @@ public void setupSuiteScopeCluster() throws Exception { assertAcked( prepareCreate( "idx", - Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), false) ).setMapping("type", "type=keyword", "num", "type=integer", "score", "type=integer") ); waitForRelocation(ClusterHealthStatus.GREEN); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15").get(); - refresh("idx"); - client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1").get(); - client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100").get(); - refresh("idx"); + indexRandom( + true, + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "5"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "11", "score", "50"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "1", "score", "2"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "12", "score", "20"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "10"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "15"), + client().prepareIndex("idx").setId("1").setSource("type", "type1", "num", "3", "score", "1"), + client().prepareIndex("idx").setId("1").setSource("type", "type2", "num", "13", "score", "100") + ); waitForRelocation(ClusterHealthStatus.GREEN); refresh(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java index 0bb2d1d7ca933..94e9ce5063277 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationCollectorManager.java @@ -15,9 +15,9 @@ import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; -import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.Objects; /** * Common {@link CollectorManager} used by both concurrent and non-concurrent aggregation path and also for global and non-global @@ -56,17 +56,9 @@ public String getCollectorReason() { @Override public ReduceableSearchResult reduce(Collection collectors) throws IOException { - final List aggregators = context.bucketCollectorProcessor().toAggregators(collectors); - final List internals = new ArrayList<>(aggregators.size()); + final List internals = context.bucketCollectorProcessor().toInternalAggregations(collectors); + assert internals.stream().noneMatch(Objects::isNull); context.aggregations().resetBucketMultiConsumer(); - for (Aggregator aggregator : aggregators) { - try { - // post collection is called in ContextIndexSearcher after search on leaves are completed - internals.add(aggregator.buildTopLevel()); - } catch (IOException e) { - throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e); - } - } final InternalAggregations internalAggregations = InternalAggregations.from(internals); return buildAggregationResult(internalAggregations); diff --git a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java index 8744d1f6a07d3..f4db8f61bf537 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/Aggregator.java @@ -33,6 +33,7 @@ package org.opensearch.search.aggregations; import org.opensearch.OpenSearchParseException; +import org.opensearch.common.SetOnce; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.lease.Releasable; import org.opensearch.core.ParseField; @@ -61,6 +62,8 @@ @PublicApi(since = "1.0.0") public abstract class Aggregator extends BucketCollector implements Releasable { + private final SetOnce internalAggregation = new SetOnce<>(); + /** * Parses the aggregation request and creates the appropriate aggregator factory for it. * @@ -83,6 +86,13 @@ public interface Parser { AggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException; } + /** + * Returns the InternalAggregation stored during post collection + */ + public InternalAggregation getPostCollectionAggregation() { + return internalAggregation.get(); + } + /** * Return the name of this aggregator. */ @@ -185,13 +195,15 @@ public interface BucketComparator { /** * Build the result of this aggregation if it is at the "top level" - * of the aggregation tree. If, instead, it is a sub-aggregation of - * another aggregation then the aggregation that contains it will call - * {@link #buildAggregations(long[])}. + * of the aggregation tree and save it. This should get called + * during post collection. If, instead, it is a sub-aggregation + * of another aggregation then the aggregation that contains + * it will call {@link #buildAggregations(long[])}. */ public final InternalAggregation buildTopLevel() throws IOException { assert parent() == null; - return buildAggregations(new long[] { 0 })[0]; + this.internalAggregation.set(buildAggregations(new long[] { 0 })[0]); + return internalAggregation.get(); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java index 135fda71a757a..df05ce3f5c049 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java +++ b/server/src/main/java/org/opensearch/search/aggregations/BucketCollectorProcessor.java @@ -72,6 +72,15 @@ public void processPostCollection(Collector collectorTree) throws IOException { } } else if (currentCollector instanceof BucketCollector) { ((BucketCollector) currentCollector).postCollection(); + + // Perform build aggregation during post collection + if (currentCollector instanceof Aggregator) { + ((Aggregator) currentCollector).buildTopLevel(); + } else if (currentCollector instanceof MultiBucketCollector) { + for (Collector innerCollector : ((MultiBucketCollector) currentCollector).getCollectors()) { + collectors.offer(innerCollector); + } + } } } } @@ -106,4 +115,31 @@ public List toAggregators(Collection collectors) { } return aggregators; } + + /** + * Unwraps the input collection of {@link Collector} to get the list of the {@link InternalAggregation}. The + * input is expected to contain the collectors related to Aggregations only as that is passed to {@link AggregationCollectorManager} + * during the reduce phase. This list of {@link InternalAggregation} is used to optionally perform reduce at shard level before + * returning response to coordinator + * @param collectors collection of aggregation collectors to reduce + * @return list of unwrapped {@link InternalAggregation} + */ + public List toInternalAggregations(Collection collectors) throws IOException { + List internalAggregations = new ArrayList<>(); + + final Deque allCollectors = new LinkedList<>(collectors); + while (!allCollectors.isEmpty()) { + Collector currentCollector = allCollectors.pop(); + if (currentCollector instanceof InternalProfileCollector) { + currentCollector = ((InternalProfileCollector) currentCollector).getCollector(); + } + + if (currentCollector instanceof Aggregator) { + internalAggregations.add(((Aggregator) currentCollector).getPostCollectionAggregation()); + } else if (currentCollector instanceof MultiBucketCollector) { + allCollectors.addAll(Arrays.asList(((MultiBucketCollector) currentCollector).getCollectors())); + } + } + return internalAggregations; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java index 8814cc3c435e1..db2a11b47fbc3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/GlobalAggCollectorManager.java @@ -12,8 +12,10 @@ import org.apache.lucene.search.CollectorManager; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Objects; @@ -42,6 +44,19 @@ public Collector newCollector() throws IOException { } } + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + // If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in + // search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore + // we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree. + if (context.searcher().getLeafContexts().isEmpty()) { + for (Collector c : collectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } + } + return super.reduce(collectors); + } + @Override protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices diff --git a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java index 1461dd3009b44..35186422fceaa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java +++ b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java @@ -131,12 +131,10 @@ public static class MultiBucketConsumer implements IntConsumer { private final int limit; private final CircuitBreaker breaker; - // aggregations execute in a single thread for both sequential - // and concurrent search, so no atomic here + // count is currently only updated in final reduce phase which is executed in single thread for both concurrent and non-concurrent + // search private int count; - - // will be updated by multiple threads in concurrent search - // hence making it as LongAdder + // will be updated by multiple threads in concurrent search hence making it as LongAdder private final LongAdder callCount; private volatile boolean circuitBreakerTripped; private final int availProcessors; diff --git a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java index 8b0a1530b5505..5408a19c8ca50 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java +++ b/server/src/main/java/org/opensearch/search/aggregations/NonGlobalAggCollectorManager.java @@ -12,8 +12,10 @@ import org.apache.lucene.search.CollectorManager; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.profile.query.CollectorResult; +import org.opensearch.search.query.ReduceableSearchResult; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Objects; @@ -42,6 +44,19 @@ public Collector newCollector() throws IOException { } } + @Override + public ReduceableSearchResult reduce(Collection collectors) throws IOException { + // If there are no leaves then in concurrent search case postCollection, and subsequently buildAggregation, will not be called in + // search path. Since we build the InternalAggregation in postCollection that will not get created in such cases either. Therefore + // we need to manually processPostCollection here to build empty InternalAggregation objects for this collector tree. + if (context.searcher().getLeafContexts().isEmpty()) { + for (Collector c : collectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } + } + return super.reduce(collectors); + } + @Override protected AggregationReduceableSearchResult buildAggregationResult(InternalAggregations internalAggregations) { // Reduce the aggregations across slices before sending to the coordinator. We will perform shard level reduce as long as any slices diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java index 4af14ab014db5..6c5619a843fae 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregationFactory.java @@ -40,6 +40,7 @@ import org.opensearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Arrays; import java.util.Map; /** @@ -80,7 +81,7 @@ protected Aggregator createInternal( @Override protected boolean supportsConcurrentSegmentSearch() { - // See https://github.com/opensearch-project/OpenSearch/issues/12331 for details - return false; + // Disable concurrent search if any scripting is used. See https://github.com/opensearch-project/OpenSearch/issues/12331 for details + return Arrays.stream(sources).noneMatch(CompositeValuesSourceConfig::hasScript); } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 69fda2f3f6133..9e40f7b4c9b3e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -45,6 +45,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; +import org.opensearch.common.SetOnce; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.common.util.LongArray; @@ -94,8 +95,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private final long valueCount; private final String fieldName; private Weight weight; - private final GlobalOrdLookupFunction lookupGlobalOrd; protected final CollectionStrategy collectionStrategy; + private final SetOnce dvs = new SetOnce<>(); protected int segmentsWithSingleValuedOrds = 0; protected int segmentsWithMultiValuedOrds = 0; @@ -129,11 +130,10 @@ public GlobalOrdinalsStringTermsAggregator( this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job. this.valuesSource = valuesSource; final IndexReader reader = context.searcher().getIndexReader(); - final SortedSetDocValues values = reader.leaves().size() > 0 + final SortedSetDocValues values = !reader.leaves().isEmpty() ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) : DocValues.emptySortedSet(); this.valueCount = values.getValueCount(); - this.lookupGlobalOrd = values::lookupOrd; this.acceptedGlobalOrdinals = includeExclude == null ? ALWAYS_TRUE : includeExclude.acceptedGlobalOrdinals(values)::get; if (remapGlobalOrds) { this.collectionStrategy = new RemapGlobalOrds(cardinality); @@ -885,7 +885,10 @@ PriorityQueue buildPriorityQueue(int size) { } StringTerms.Bucket convertTempBucketToRealBucket(OrdBucket temp) throws IOException { - BytesRef term = BytesRef.deepCopyOf(lookupGlobalOrd.apply(temp.globalOrd)); + // Recreate DocValues as needed for concurrent segment search + SortedSetDocValues values = getDocValues(); + BytesRef term = BytesRef.deepCopyOf(values.lookupOrd(temp.globalOrd)); + StringTerms.Bucket result = new StringTerms.Bucket(term, temp.docCount, null, showTermDocCountError, 0, format); result.bucketOrd = temp.bucketOrd; result.docCountError = 0; @@ -1001,7 +1004,9 @@ BucketUpdater bucketUpdater(long owningBucketOrd) long subsetSize = subsetSize(owningBucketOrd); return (spare, globalOrd, bucketOrd, docCount) -> { spare.bucketOrd = bucketOrd; - oversizedCopy(lookupGlobalOrd.apply(globalOrd), spare.termBytes); + // Recreate DocValues as needed for concurrent segment search + SortedSetDocValues values = getDocValues(); + oversizedCopy(values.lookupOrd(globalOrd), spare.termBytes); spare.subsetDf = docCount; spare.subsetSize = subsetSize; spare.supersetDf = backgroundFrequencies.freq(spare.termBytes); @@ -1086,4 +1091,18 @@ private void oversizedCopy(BytesRef from, BytesRef to) { * Predicate used for {@link #acceptedGlobalOrdinals} if there is no filter. */ private static final LongPredicate ALWAYS_TRUE = l -> true; + + /** + * If DocValues have not been initialized yet for reduce phase, create and set them. + */ + private SortedSetDocValues getDocValues() throws IOException { + if (dvs.get() == null) { + dvs.set( + !context.searcher().getIndexReader().leaves().isEmpty() + ? valuesSource.globalOrdinalsValues(context.searcher().getIndexReader().leaves().get(0)) + : DocValues.emptySortedSet() + ); + } + return dvs.get(); + } } diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 3d13378e58e5d..8f6001dc06755 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -113,6 +113,12 @@ public List toAggregators(Collection collectors) { // should not be called when there is no aggregation collector throw new IllegalStateException("Unexpected toAggregators call on NO_OP_BUCKET_COLLECTOR_PROCESSOR"); } + + @Override + public List toInternalAggregations(Collection collectors) { + // should not be called when there is no aggregation collector + throw new IllegalStateException("Unexpected toInternalAggregations call on NO_OP_BUCKET_COLLECTOR_PROCESSOR"); + } }; private final List releasables = new CopyOnWriteArrayList<>(); diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java index d68b0911d3d01..c10233a72921f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java @@ -187,10 +187,16 @@ protected LeafSlice[] slices(List leaves) { AggregationCollectorManager collectorManager; if (expectedNonGlobalAggsPerSlice > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(NonGlobalAggCollectorManager.class); + for (Collector c : nonGlobalCollectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } collectorManager.reduce(nonGlobalCollectors).reduce(context.queryResult()); } if (expectedGlobalAggs > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(GlobalAggCollectorManager.class); + for (Collector c : globalCollectors) { + context.bucketCollectorProcessor().processPostCollection(c); + } ReduceableSearchResult result = collectorManager.reduce(globalCollectors); doReturn(result).when(testSearcher) .search(nullable(Query.class), ArgumentMatchers.>any()); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index aac3fca9e1e16..f381ebdb64fc2 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -211,7 +211,12 @@ "LuceneFixedGap", "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", - "Lucene50" }) + "Lucene50", + "Lucene90", + "Lucene94", + "Lucene90", + "Lucene95", + "Lucene99" }) @LuceneTestCase.SuppressReproduceLine public abstract class OpenSearchTestCase extends LuceneTestCase { From 8d5a1d2ece8c4b2cd93d3fd9bab8e6c3359a03d1 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 27 Mar 2024 19:53:07 +0800 Subject: [PATCH 17/56] Convert ingest processor supports ip type (#12818) * Convert ingest processor supports ip type Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong * Add comment Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../ingest/common/ConvertProcessor.java | 14 ++++ .../ingest/common/ConvertProcessorTests.java | 25 ++++++ .../test/ingest/330_convert_processor.yml | 83 +++++++++++++++++++ 4 files changed, 123 insertions(+) create mode 100644 modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index cf0a38c081392..f12f4f33acb5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java index 2a81fa5f4986e..c7b5a8978188f 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ConvertProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.common.network.InetAddresses; import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.ingest.IngestDocument; @@ -118,6 +119,19 @@ public Object convert(Object value) { return value.toString(); } }, + IP { + @Override + public Object convert(Object value) { + // If the value is a valid ipv4/ipv6 address, we return the original value directly because IpFieldType + // can accept string value, this is simpler than we return an InetAddress object which needs to do more + // work such as serialization + if (value instanceof String && InetAddresses.isInetAddress(value.toString())) { + return value; + } else { + throw new IllegalArgumentException("[" + value + "] is not a valid ipv4/ipv6 address"); + } + } + }, AUTO { @Override public Object convert(Object value) { diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java index 0ba0a39261d00..50ece9282888f 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ConvertProcessorTests.java @@ -550,4 +550,29 @@ public void testTargetField() throws Exception { assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(String.valueOf(randomInt))); assertThat(ingestDocument.getFieldValue(targetField, Integer.class), equalTo(randomInt)); } + + public void testConvertIP() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String validIPString; + if (randomBoolean()) { + validIPString = "1.2.3.4"; + } else { + validIPString = "::1"; + } + String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, validIPString); + + Processor processor = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.getFieldValue(fieldName, String.class), equalTo(validIPString)); + + String invalidIPString = randomAlphaOfLength(10); + fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, invalidIPString); + Processor processorWithInvalidIP = new ConvertProcessor(randomAlphaOfLength(10), null, fieldName, fieldName, Type.IP, false); + try { + processorWithInvalidIP.execute(ingestDocument); + fail("processor execute should have failed"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("[" + invalidIPString + "] is not a valid ipv4/ipv6 address")); + } + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml new file mode 100644 index 0000000000000..994ed225dd624 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/330_convert_processor.yml @@ -0,0 +1,83 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test convert processor with ip type": + - skip: + version: " - 2.13.99" + reason: "introduced in 2.14.0" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "type": "ip" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /\[1.1.1.\] is not a valid ipv4\/ipv6 address/ + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "convert" : { + "field" : "raw_ip", + "target_field" : "ip_field", + "type" : "ip", + "ignore_failure" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1." + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1."} } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + raw_ip: "1.1.1.1" + } + - do: + get: + index: test + id: 1 + - match: { _source: { raw_ip: "1.1.1.1", ip_field: "1.1.1.1"} } From 1d1d37010e8e1ef70aab87d125de89002a500562 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 27 Mar 2024 20:55:31 +0800 Subject: [PATCH 18/56] Update supported version for the bug fix which add more index blocks check for resize APIs (#12921) Signed-off-by: Gao Binlong --- .../resources/rest-api-spec/test/indices.clone/10_basic.yml | 6 +++--- .../rest-api-spec/test/indices.shrink/10_basic.yml | 6 +++--- .../resources/rest-api-spec/test/indices.split/10_basic.yml | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml index fa48820a71a89..07df09225c624 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.clone/10_basic.yml @@ -113,12 +113,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 426729e737978..67b5be7eb0fd5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -72,12 +72,12 @@ setup: - match: { _id: "1" } - match: { _source: { foo: "hello world" } } +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 50c2819eac9d5..096a61a765288 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -219,12 +219,12 @@ setup: index.number_of_replicas: 0 index.number_of_shards: 6 +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/4845 --- "Returns error if target index's metadata write is blocked": - - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.7.99" + reason: "the bug was fixed in 2.8.0" # block source index's write operations - do: From 5db84d16a96932e4530ff1303bdd6edde52f4caf Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Wed, 27 Mar 2024 12:11:53 -0400 Subject: [PATCH 19/56] Allow setting KEYSTORE_PASSWORD through env variable (#12865) --- CHANGELOG.md | 1 + distribution/src/bin/opensearch | 12 +++++++----- distribution/src/bin/opensearch.bat | 13 ++++++++----- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f12f4f33acb5e..8a18660a2d84c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -105,6 +105,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Added - Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) +- Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) ### Dependencies diff --git a/distribution/src/bin/opensearch b/distribution/src/bin/opensearch index 947d1167f79f2..8a3b0a009437f 100755 --- a/distribution/src/bin/opensearch +++ b/distribution/src/bin/opensearch @@ -36,14 +36,16 @@ fi # get keystore password before setting java options to avoid # conflicting GC configurations for the keystore tools -unset KEYSTORE_PASSWORD -KEYSTORE_PASSWORD= if [[ $CHECK_KEYSTORE = true ]] \ && bin/opensearch-keystore has-passwd --silent then - if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then - echo "Failed to read keystore password on console" 1>&2 - exit 1 + if [[ ! -z "${KEYSTORE_PASSWORD}" ]]; then + echo "Using value of KEYSTORE_PASSWORD from the environment" + else + if ! read -s -r -p "OpenSearch keystore password: " KEYSTORE_PASSWORD ; then + echo "Failed to read keystore password on console" 1>&2 + exit 1 + fi fi fi diff --git a/distribution/src/bin/opensearch.bat b/distribution/src/bin/opensearch.bat index cce21504c55b7..b7ecab24165fa 100644 --- a/distribution/src/bin/opensearch.bat +++ b/distribution/src/bin/opensearch.bat @@ -62,14 +62,17 @@ if not exist "%SERVICE_LOG_DIR%" ( mkdir "%SERVICE_LOG_DIR%" ) -SET KEYSTORE_PASSWORD= IF "%checkpassword%"=="Y" ( CALL "%~dp0opensearch-keystore.bat" has-passwd --silent IF !ERRORLEVEL! EQU 0 ( - SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: - IF !ERRORLEVEL! NEQ 0 ( - ECHO Failed to read keystore password on standard input - EXIT /B !ERRORLEVEL! + if defined KEYSTORE_PASSWORD ( + ECHO Using value of KEYSTORE_PASSWORD from the environment + ) else ( + SET /P KEYSTORE_PASSWORD=OpenSearch keystore password: + IF !ERRORLEVEL! NEQ 0 ( + ECHO Failed to read keystore password on standard input + EXIT /B !ERRORLEVEL! + ) ) ) ) From 6ddbdcdc89507c95ca320b4112075cdf764e5d60 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Wed, 27 Mar 2024 15:50:30 -0700 Subject: [PATCH 20/56] [DerivedFields] PR2 - Implementation for all supported types and DerivedFieldType (#12808) Implementation for all supported types and DerivedFieldType. We support the following types so far: boolean date geo_point ip keyword long double --------- Signed-off-by: Rishabh Maurya --- .../mapper/DerivedFieldSupportedTypes.java | 156 ++++++++ .../index/mapper/DerivedFieldType.java | 363 ++++++++++++++++++ .../index/mapper/KeywordFieldMapper.java | 2 +- .../index/mapper/DerivedFieldTypeTests.java | 94 +++++ 4 files changed, 614 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java new file mode 100644 index 0000000000000..10b5c4a0f7157 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldSupportedTypes.java @@ -0,0 +1,156 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.opensearch.Version; +import org.opensearch.common.Booleans; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.network.InetAddresses; + +import java.net.InetAddress; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Contains logic to get the FieldMapper for a given type of derived field. Also, for a given type of derived field, + * it is used to create an IndexableField for the provided type and object. It is useful when indexing into + * lucene MemoryIndex in {@link org.opensearch.index.query.DerivedFieldQuery}. + */ +enum DerivedFieldSupportedTypes { + + BOOLEAN("boolean", (name, context) -> { + BooleanFieldMapper.Builder builder = new BooleanFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // Trying to mimic the logic for parsing source value as used in BooleanFieldMapper valueFetcher + Boolean value; + if (o instanceof Boolean) { + value = (Boolean) o; + } else { + String textValue = o.toString(); + value = Booleans.parseBooleanStrict(textValue, false); + } + return new Field(name, value ? "T" : "F", BooleanFieldMapper.Defaults.FIELD_TYPE); + }), + DATE("date", (name, context) -> { + // TODO: should we support mapping settings exposed by a given field type from derived fields too? + // for example, support `format` for date type? + DateFieldMapper.Builder builder = new DateFieldMapper.Builder( + name, + DateFieldMapper.Resolution.MILLISECONDS, + DateFieldMapper.getDefaultDateTimeFormatter(), + false, + Version.CURRENT + ); + return builder.build(context); + }, name -> o -> new LongPoint(name, (long) o)), + GEO_POINT("geo_point", (name, context) -> { + GeoPointFieldMapper.Builder builder = new GeoPointFieldMapper.Builder(name); + return builder.build(context); + }, name -> o -> { + // convert o to array of double + if (!(o instanceof List) || ((List) o).size() != 2 || !(((List) o).get(0) instanceof Double)) { + throw new ClassCastException("geo_point should be in format emit(double lat, double lon) for derived fields"); + } + return new LatLonPoint(name, (Double) ((List) o).get(0), (Double) ((List) o).get(1)); + }), + IP("ip", (name, context) -> { + IpFieldMapper.Builder builder = new IpFieldMapper.Builder(name, false, Version.CURRENT); + return builder.build(context); + }, name -> o -> { + InetAddress address; + if (o instanceof InetAddress) { + address = (InetAddress) o; + } else { + address = InetAddresses.forString(o.toString()); + } + return new InetAddressPoint(name, address); + }), + KEYWORD("keyword", (name, context) -> { + FieldType dummyFieldType = new FieldType(); + dummyFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + KeywordFieldMapper.Builder keywordBuilder = new KeywordFieldMapper.Builder(name); + KeywordFieldMapper.KeywordFieldType keywordFieldType = keywordBuilder.buildFieldType(context, dummyFieldType); + keywordFieldType.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + return new KeywordFieldMapper( + name, + dummyFieldType, + keywordFieldType, + keywordBuilder.multiFieldsBuilder.build(keywordBuilder, context), + keywordBuilder.copyTo.build(), + keywordBuilder + ); + }, name -> o -> new KeywordField(name, (String) o, Field.Store.NO)), + LONG("long", (name, context) -> { + NumberFieldMapper.Builder longBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.LONG, false, false); + return longBuilder.build(context); + }, name -> o -> new LongField(name, Long.parseLong(o.toString()), Field.Store.NO)), + DOUBLE("double", (name, context) -> { + NumberFieldMapper.Builder doubleBuilder = new NumberFieldMapper.Builder(name, NumberFieldMapper.NumberType.DOUBLE, false, false); + return doubleBuilder.build(context); + }, name -> o -> new DoubleField(name, Double.parseDouble(o.toString()), Field.Store.NO)); + + final String name; + private final BiFunction builder; + + private final Function> indexableFieldBuilder; + + DerivedFieldSupportedTypes( + String name, + BiFunction builder, + Function> indexableFieldBuilder + ) { + this.name = name; + this.builder = builder; + this.indexableFieldBuilder = indexableFieldBuilder; + } + + public String getName() { + return name; + } + + private FieldMapper getFieldMapper(String name, Mapper.BuilderContext context) { + return builder.apply(name, context); + } + + private Function getIndexableFieldGenerator(String name) { + return indexableFieldBuilder.apply(name); + } + + private static final Map enumMap = Arrays.stream(DerivedFieldSupportedTypes.values()) + .collect(Collectors.toMap(DerivedFieldSupportedTypes::getName, enumValue -> enumValue)); + + public static FieldMapper getFieldMapperFromType(String type, String name, Mapper.BuilderContext context) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getFieldMapper(name, context); + } + + public static Function getIndexableFieldGeneratorType(String type, String name) { + if (!enumMap.containsKey(type)) { + throw new IllegalArgumentException("Type [" + type + "] isn't supported in Derived field context."); + } + return enumMap.get(type).getIndexableFieldGenerator(name); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java new file mode 100644 index 0000000000000..abdca7879cc94 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldType.java @@ -0,0 +1,363 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.queries.spans.SpanQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; +import org.opensearch.common.Nullable; +import org.opensearch.common.geo.ShapeRelation; +import org.opensearch.common.time.DateMathParser; +import org.opensearch.common.unit.Fuzziness; +import org.opensearch.index.query.DerivedFieldQuery; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.script.DerivedFieldScript; +import org.opensearch.script.Script; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +/** + * MappedFieldType for Derived Fields + * Contains logic to execute different type of queries on a derived field of given type. + * @opensearch.internal + */ +public final class DerivedFieldType extends MappedFieldType { + private final String type; + + private final Script script; + + FieldMapper typeFieldMapper; + + final Function indexableFieldGenerator; + + public DerivedFieldType( + String name, + String type, + Script script, + boolean isIndexed, + boolean isStored, + boolean hasDocValues, + Map meta, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + super(name, isIndexed, isStored, hasDocValues, typeFieldMapper.fieldType().getTextSearchInfo(), meta); + this.type = type; + this.script = script; + this.typeFieldMapper = typeFieldMapper; + this.indexableFieldGenerator = fieldFunction; + } + + public DerivedFieldType( + String name, + String type, + Script script, + FieldMapper typeFieldMapper, + Function fieldFunction + ) { + this(name, type, script, false, false, false, Collections.emptyMap(), typeFieldMapper, fieldFunction); + } + + @Override + public String typeName() { + return "derived"; + } + + public String getType() { + return type; + } + + @Override + public DerivedFieldValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQuery(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termQueryCaseInsensitive(Object value, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termQueryCaseInsensitive(value, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query termsQuery(List values, @Nullable QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.termsQuery(values, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query rangeQuery( + Object lowerTerm, + Object upperTerm, + boolean includeLower, + boolean includeUpper, + ShapeRelation relation, + ZoneId timeZone, + DateMathParser parser, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.rangeQuery( + lowerTerm, + upperTerm, + includeLower, + includeUpper, + relation, + timeZone, + parser, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery(value, fuzziness, prefixLength, maxExpansions, transpositions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query fuzzyQuery( + Object value, + Fuzziness fuzziness, + int prefixLength, + int maxExpansions, + boolean transpositions, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.fuzzyQuery( + value, + fuzziness, + prefixLength, + maxExpansions, + transpositions, + method, + context + ); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query prefixQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.prefixQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query wildcardQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.wildcardQuery(value, method, caseInsensitive, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.normalizedWildcardQuery(value, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query regexpQuery( + String value, + int syntaxFlags, + int matchFlags, + int maxDeterminizedStates, + @Nullable MultiTermQuery.RewriteMethod method, + QueryShardContext context + ) { + Query query = typeFieldMapper.mappedFieldType.regexpQuery(value, syntaxFlags, matchFlags, maxDeterminizedStates, method, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + Query query = typeFieldMapper.mappedFieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + Query query = typeFieldMapper.mappedFieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + throw new IllegalArgumentException( + "Can only use span prefix queries on text fields - not on [" + name() + "] which is of type [" + typeName() + "]" + ); + } + + @Override + public Query distanceFeatureQuery(Object origin, String pivot, float boost, QueryShardContext context) { + Query query = typeFieldMapper.mappedFieldType.distanceFeatureQuery(origin, pivot, boost, context); + DerivedFieldValueFetcher valueFetcher = new DerivedFieldValueFetcher(getDerivedFieldLeafFactory(context)); + return new DerivedFieldQuery( + query, + valueFetcher, + context.lookup(), + indexableFieldGenerator, + typeFieldMapper.mappedFieldType.indexAnalyzer() + ); + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support exist queries"); + } + + @Override + public boolean isAggregatable() { + return false; + } + + private DerivedFieldScript.LeafFactory getDerivedFieldLeafFactory(QueryShardContext context) { + if (!context.documentMapper("").sourceMapper().enabled()) { + throw new IllegalArgumentException( + "DerivedFieldQuery error: unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [" + + context.index().getName() + + "]" + ); + } + DerivedFieldScript.Factory factory = context.compile(script, DerivedFieldScript.CONTEXT); + return factory.newFactory(script.getParams(), context.lookup()); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index c14b2c92c89c3..42b974734e5e7 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -218,7 +218,7 @@ protected List> getParameters() { ); } - private KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { + protected KeywordFieldType buildFieldType(BuilderContext context, FieldType fieldType) { NamedAnalyzer normalizer = Lucene.KEYWORD_ANALYZER; NamedAnalyzer searchAnalyzer = Lucene.KEYWORD_ANALYZER; String normalizerName = this.normalizer.getValue(); diff --git a/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java new file mode 100644 index 0000000000000..72fb7c88cc478 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/DerivedFieldTypeTests.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.InetAddressPoint; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LatLonPoint; +import org.apache.lucene.document.LongField; +import org.apache.lucene.document.LongPoint; +import org.opensearch.script.Script; + +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DerivedFieldTypeTests extends FieldTypeTestCase { + + private DerivedFieldType createDerivedFieldType(String type) { + Mapper.BuilderContext context = mock(Mapper.BuilderContext.class); + when(context.path()).thenReturn(new ContentPath()); + return new DerivedFieldType( + type + " _derived_field", + type, + new Script(""), + DerivedFieldSupportedTypes.getFieldMapperFromType(type, type + "_derived_field", context), + DerivedFieldSupportedTypes.getIndexableFieldGeneratorType(type, type + "_derived_field") + ); + } + + public void testBooleanType() { + DerivedFieldType dft = createDerivedFieldType("boolean"); + assertTrue(dft.typeFieldMapper instanceof BooleanFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(true) instanceof Field); + assertTrue(dft.indexableFieldGenerator.apply(false) instanceof Field); + } + + public void testDateType() { + DerivedFieldType dft = createDerivedFieldType("date"); + assertTrue(dft.typeFieldMapper instanceof DateFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(System.currentTimeMillis()) instanceof LongPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testGeoPointType() { + DerivedFieldType dft = createDerivedFieldType("geo_point"); + assertTrue(dft.typeFieldMapper instanceof GeoPointFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(List.of(10.0, 20.0)) instanceof LatLonPoint); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of())); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of("10"))); + expectThrows(ClassCastException.class, () -> dft.indexableFieldGenerator.apply(List.of(10.0, 20.0, 30.0))); + } + + public void testIPType() { + DerivedFieldType dft = createDerivedFieldType("ip"); + assertTrue(dft.typeFieldMapper instanceof IpFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("127.0.0.1") instanceof InetAddressPoint); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("blah")); + } + + public void testKeywordType() { + DerivedFieldType dft = createDerivedFieldType("keyword"); + assertTrue(dft.typeFieldMapper instanceof KeywordFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply("test_keyword") instanceof KeywordField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10)); + } + + public void testLongType() { + DerivedFieldType dft = createDerivedFieldType("long"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10) instanceof LongField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply(10.0)); + } + + public void testDoubleType() { + DerivedFieldType dft = createDerivedFieldType("double"); + assertTrue(dft.typeFieldMapper instanceof NumberFieldMapper); + assertTrue(dft.indexableFieldGenerator.apply(10.0) instanceof DoubleField); + expectThrows(Exception.class, () -> dft.indexableFieldGenerator.apply("")); + } + + public void testUnsupportedType() { + expectThrows(IllegalArgumentException.class, () -> createDerivedFieldType("match_only_text")); + } +} From 8ad0dc09d3e852d74cd7eac65882ea7674a282d9 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Thu, 28 Mar 2024 08:44:27 +0530 Subject: [PATCH 21/56] [Remote Store] Add RemoteStoreSettings class to handle remote store related settings (#12838) * Add RemoteStoreSettings class to handle remote store related settings --------- Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../opensearch/remotestore/RemoteStoreIT.java | 11 +-- .../common/settings/ClusterSettings.java | 8 +- .../org/opensearch/index/IndexModule.java | 9 +- .../org/opensearch/index/IndexService.java | 11 +-- .../opensearch/index/shard/IndexShard.java | 11 ++- .../shard/RemoteStoreRefreshListener.java | 2 +- .../indices/DefaultRemoteStoreSettings.java | 26 ++++++ .../opensearch/indices/IndicesService.java | 35 ++------ .../indices/RemoteStoreSettings.java | 90 +++++++++++++++++++ .../indices/recovery/RecoverySettings.java | 32 ------- .../main/java/org/opensearch/node/Node.java | 6 +- .../opensearch/index/IndexModuleTests.java | 5 +- .../RemoteStoreRefreshListenerTests.java | 10 +-- .../indices/IndicesServiceTests.java | 5 +- ...RemoteStoreSettingsDynamicUpdateTests.java | 69 ++++++++++++++ .../RecoverySettingsDynamicUpdateTests.java | 45 ---------- .../snapshots/SnapshotResiliencyTests.java | 4 +- .../index/shard/IndexShardTestCase.java | 3 +- 19 files changed, 249 insertions(+), 136 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java create mode 100644 server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java create mode 100644 server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 7e0c1630a76e4..d218f0a985cf3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -84,6 +84,7 @@ import org.opensearch.index.translog.TestTranslog; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; @@ -711,9 +712,9 @@ public static final IndexShard newIndexShard( SegmentReplicationCheckpointPublisher.EMPTY, null, null, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, nodeId, null, + DefaultRemoteStoreSettings.INSTANCE, false ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index e1997fea3433a..46e5b7aa28318 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -31,6 +31,7 @@ import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.translog.Translog.Durability; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; @@ -56,7 +57,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.comparesEqualTo; @@ -189,7 +190,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME); - int lastNMetadataFilesToKeep = indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles(); + int lastNMetadataFilesToKeep = indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles(); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); @@ -224,7 +225,7 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "3"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -243,7 +244,7 @@ public void testStaleCommitDeletionWithMinSegmentFiles_3() throws Exception { public void testStaleCommitDeletionWithMinSegmentFiles_Disabled() throws Exception { Settings.Builder settings = Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), "-1"); internalCluster().startNode(settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); @@ -469,7 +470,7 @@ public void testAsyncDurabilityThrowsExceptionWhenRestrictSettingTrue() throws E private void assertClusterRemoteBufferInterval(TimeValue expectedBufferInterval, String dataNode) { IndicesService indicesService = internalCluster().getInstance(IndicesService.class, dataNode); - assertEquals(expectedBufferInterval, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals(expectedBufferInterval, indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval()); } private void assertBufferInterval(TimeValue expectedBufferInterval, IndexShard indexShard) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 4f1815de224db..8760ee3c94309 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -117,6 +117,7 @@ import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.IndicesRequestCache; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.analysis.HunspellService; import org.opensearch.indices.breaker.BreakerSettings; @@ -297,7 +298,6 @@ public void apply(Settings value, Settings current, Settings previous) { RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_OPERATIONS_SETTING, RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_REMOTE_STORE_STREAMS_SETTING, RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, - RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING, ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING, @@ -706,7 +706,6 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteClusterStateService.METADATA_MANIFEST_UPLOAD_TIMEOUT_SETTING, RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING, RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING, IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING, IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, @@ -723,7 +722,10 @@ public void apply(Settings value, Settings current, Settings previous) { // Concurrent segment search settings SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, - SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING + SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING, + + RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 6ac10a221d49e..3c4cb4fd596c1 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -79,6 +79,7 @@ import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.translog.TranslogFactory; import org.opensearch.indices.IndicesQueryCache; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoverySettings; @@ -604,8 +605,8 @@ public IndexService newIndexService( IndexStorePlugin.DirectoryFactory remoteDirectoryFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) throws IOException { final IndexEventListener eventListener = freeze(); Function> readerWrapperFactory = indexReaderWrapper @@ -663,8 +664,8 @@ public IndexService newIndexService( recoveryStateFactory, translogFactorySupplier, clusterDefaultRefreshIntervalSupplier, - clusterRemoteTranslogBufferIntervalSupplier, - recoverySettings + recoverySettings, + remoteStoreSettings ); success = true; return indexService; diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 109bda65b1fd8..03cf8f9182211 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -94,6 +94,7 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; @@ -183,8 +184,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final ValuesSourceRegistry valuesSourceRegistry; private final BiFunction translogFactorySupplier; private final Supplier clusterDefaultRefreshIntervalSupplier; - private final Supplier clusterRemoteTranslogBufferIntervalSupplier; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; public IndexService( IndexSettings indexSettings, @@ -219,8 +220,8 @@ public IndexService( IndexStorePlugin.RecoveryStateFactory recoveryStateFactory, BiFunction translogFactorySupplier, Supplier clusterDefaultRefreshIntervalSupplier, - Supplier clusterRemoteTranslogBufferIntervalSupplier, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RemoteStoreSettings remoteStoreSettings ) { super(indexSettings); this.allowExpensiveQueries = allowExpensiveQueries; @@ -296,8 +297,8 @@ public IndexService( this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); this.translogFactorySupplier = translogFactorySupplier; - this.clusterRemoteTranslogBufferIntervalSupplier = clusterRemoteTranslogBufferIntervalSupplier; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; updateFsyncTaskIfNecessary(); } @@ -549,9 +550,9 @@ public synchronized IndexShard createShard( this.indexSettings.isSegRepEnabledOrRemoteNode() ? checkpointPublisher : null, remoteStore, remoteStoreStatsTrackerFactory, - clusterRemoteTranslogBufferIntervalSupplier, nodeEnv.nodeId(), recoverySettings, + remoteStoreSettings, seedRemote ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 551dd338eed2a..f3a62cdf1436f 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -179,6 +179,7 @@ import org.opensearch.index.warmer.WarmerStats; import org.opensearch.indices.IndexingMemoryController; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; @@ -349,6 +350,7 @@ Runnable getGlobalCheckpointSyncer() { private final List internalRefreshListener = new ArrayList<>(); private final RemoteStoreFileDownloader fileDownloader; private final RecoverySettings recoverySettings; + private final RemoteStoreSettings remoteStoreSettings; /* On source doc rep node, It will be DOCREP_NON_MIGRATING. On source remote node , it will be REMOTE_MIGRATING_SEEDED when relocating from remote node @@ -381,9 +383,9 @@ public IndexShard( @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, @Nullable final Store remoteStore, final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, - final Supplier clusterRemoteTranslogBufferIntervalSupplier, final String nodeId, final RecoverySettings recoverySettings, + final RemoteStoreSettings remoteStoreSettings, boolean seedRemote ) throws IOException { super(shardRouting.shardId(), indexSettings); @@ -405,7 +407,7 @@ public IndexShard( threadPool, this::getEngine, indexSettings.isRemoteNode(), - () -> getRemoteTranslogUploadBufferInterval(clusterRemoteTranslogBufferIntervalSupplier) + () -> getRemoteTranslogUploadBufferInterval(remoteStoreSettings::getClusterRemoteTranslogBufferInterval) ); this.mapperService = mapperService; this.indexCache = indexCache; @@ -481,6 +483,7 @@ public boolean shouldCache(Query query) { : mapperService.documentMapper().mappers().containsTimeStampField(); this.remoteStoreStatsTrackerFactory = remoteStoreStatsTrackerFactory; this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); } @@ -598,6 +601,10 @@ public RecoverySettings getRecoverySettings() { return recoverySettings; } + public RemoteStoreSettings getRemoteStoreSettings() { + return remoteStoreSettings; + } + public RemoteStoreFileDownloader getFileDownloader() { return fileDownloader; } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index fb96102bc6094..351aec6e3af6c 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -224,7 +224,7 @@ private boolean syncSegments() { // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. if (isRefreshAfterCommit()) { - remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles()); + remoteDirectory.deleteStaleSegmentsAsync(indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles()); } try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { diff --git a/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java new file mode 100644 index 0000000000000..d3937600a848b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/DefaultRemoteStoreSettings.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; + +/** + * Utility to provide a {@link RemoteStoreSettings} instance containing all defaults + * + * @opensearch.internal + */ +public final class DefaultRemoteStoreSettings { + private DefaultRemoteStoreSettings() {} + + public static final RemoteStoreSettings INSTANCE = new RemoteStoreSettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 9bc81c1826c2d..28ad4b23e319c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -249,17 +249,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * Used to specify the default translog buffer interval for remote store backed indexes. - */ - public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( - "cluster.remote_store.translog.buffer_interval", - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, - Property.NodeScope, - Property.Dynamic - ); - /** * This setting is used to set the refresh interval when the {@code index.refresh_interval} index setting is not * provided during index creation or when the existing {@code index.refresh_interval} index setting is set as null. @@ -366,7 +355,7 @@ public class IndicesService extends AbstractLifecycleComponent private volatile boolean idFieldDataEnabled; private volatile boolean allowExpensiveQueries; private final RecoverySettings recoverySettings; - + private final RemoteStoreSettings remoteStoreSettings; @Nullable private final OpenSearchThreadPoolExecutor danglingIndicesThreadPoolExecutor; private final Set danglingIndicesToWrite = Sets.newConcurrentHashSet(); @@ -375,8 +364,6 @@ public class IndicesService extends AbstractLifecycleComponent private final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory; private final BiFunction translogFactorySupplier; private volatile TimeValue clusterDefaultRefreshInterval; - private volatile TimeValue clusterRemoteTranslogBufferInterval; - private final SearchRequestStats searchRequestStats; @Override @@ -411,7 +398,8 @@ public IndicesService( SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, RecoverySettings recoverySettings, - CacheService cacheService + CacheService cacheService, + RemoteStoreSettings remoteStoreSettings ) { this.settings = settings; this.threadPool = threadPool; @@ -515,10 +503,8 @@ protected void closeInternal() { this.clusterDefaultRefreshInterval = CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING.get(clusterService.getSettings()); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING, this::onRefreshIntervalUpdate); - this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings() - .addSettingsUpdateConsumer(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, this::setClusterRemoteTranslogBufferInterval); this.recoverySettings = recoverySettings; + this.remoteStoreSettings = remoteStoreSettings; } /** @@ -923,8 +909,8 @@ private synchronized IndexService createIndexService( remoteDirectoryFactory, translogFactorySupplier, this::getClusterDefaultRefreshInterval, - this::getClusterRemoteTranslogBufferInterval, - this.recoverySettings + this.recoverySettings, + this.remoteStoreSettings ); } @@ -2044,12 +2030,7 @@ private TimeValue getClusterDefaultRefreshInterval() { return this.clusterDefaultRefreshInterval; } - // Exclusively for testing, please do not use it elsewhere. - public TimeValue getClusterRemoteTranslogBufferInterval() { - return clusterRemoteTranslogBufferInterval; - } - - private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { - this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + public RemoteStoreSettings getRemoteStoreSettings() { + return this.remoteStoreSettings; } } diff --git a/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java new file mode 100644 index 0000000000000..5e6dba2b398db --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/RemoteStoreSettings.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexSettings; + +/** + * Settings for remote store + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class RemoteStoreSettings { + + /** + * Used to specify the default translog buffer interval for remote store backed indexes. + */ + public static final Setting CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING = Setting.timeSetting( + "cluster.remote_store.translog.buffer_interval", + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + IndexSettings.MINIMUM_REMOTE_TRANSLOG_BUFFER_INTERVAL, + Property.NodeScope, + Property.Dynamic + ); + + /** + * Controls minimum number of metadata files to keep in remote segment store. + * {@code value < 1} will disable deletion of stale segment metadata files. + */ + public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( + "cluster.remote_store.index.segment_metadata.retention.max_count", + 10, + -1, + v -> { + if (v == 0) { + throw new IllegalArgumentException( + "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" + ); + } + }, + Property.NodeScope, + Property.Dynamic + ); + + private volatile TimeValue clusterRemoteTranslogBufferInterval; + private volatile int minRemoteSegmentMetadataFiles; + + public RemoteStoreSettings(Settings settings, ClusterSettings clusterSettings) { + this.clusterRemoteTranslogBufferInterval = CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, + this::setClusterRemoteTranslogBufferInterval + ); + + minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, + this::setMinRemoteSegmentMetadataFiles + ); + } + + // Exclusively for testing, please do not use it elsewhere. + public TimeValue getClusterRemoteTranslogBufferInterval() { + return clusterRemoteTranslogBufferInterval; + } + + private void setClusterRemoteTranslogBufferInterval(TimeValue clusterRemoteTranslogBufferInterval) { + this.clusterRemoteTranslogBufferInterval = clusterRemoteTranslogBufferInterval; + } + + private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { + this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; + } + + public int getMinRemoteSegmentMetadataFiles() { + return this.minRemoteSegmentMetadataFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java index 2b41eb125d808..53b42347aa30d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySettings.java @@ -159,25 +159,6 @@ public class RecoverySettings { Property.NodeScope ); - /** - * Controls minimum number of metadata files to keep in remote segment store. - * {@code value < 1} will disable deletion of stale segment metadata files. - */ - public static final Setting CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING = Setting.intSetting( - "cluster.remote_store.index.segment_metadata.retention.max_count", - 10, - -1, - v -> { - if (v == 0) { - throw new IllegalArgumentException( - "Value 0 is not allowed for this setting as it would delete all the data from remote segment store" - ); - } - }, - Property.NodeScope, - Property.Dynamic - ); - public static final Setting INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT = Setting.timeSetting( "indices.recovery.internal_remote_upload_timeout", new TimeValue(1, TimeUnit.HOURS), @@ -199,7 +180,6 @@ public class RecoverySettings { private volatile TimeValue internalActionTimeout; private volatile TimeValue internalActionRetryTimeout; private volatile TimeValue internalActionLongTimeout; - private volatile int minRemoteSegmentMetadataFiles; private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; private volatile TimeValue internalRemoteUploadTimeout; @@ -243,11 +223,6 @@ public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this::setInternalActionLongTimeout ); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); - minRemoteSegmentMetadataFiles = CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer( - CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING, - this::setMinRemoteSegmentMetadataFiles - ); clusterSettings.addSettingsUpdateConsumer(INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT, this::setInternalRemoteUploadTimeout); } @@ -354,11 +329,4 @@ private void setMaxConcurrentRemoteStoreStreams(int maxConcurrentRemoteStoreStre this.maxConcurrentRemoteStoreStreams = maxConcurrentRemoteStoreStreams; } - private void setMinRemoteSegmentMetadataFiles(int minRemoteSegmentMetadataFiles) { - this.minRemoteSegmentMetadataFiles = minRemoteSegmentMetadataFiles; - } - - public int getMinRemoteSegmentMetadataFiles() { - return this.minRemoteSegmentMetadataFiles; - } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index ea449afe1c811..8348b8f02d342 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -153,6 +153,7 @@ import org.opensearch.index.store.remote.filecache.FileCacheFactory; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.SystemIndices; @@ -788,6 +789,8 @@ protected Node( final RecoverySettings recoverySettings = new RecoverySettings(settings, settingsModule.getClusterSettings()); + final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(settings, settingsModule.getClusterSettings()); + final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( repositoriesServiceReference::get, threadPool @@ -825,7 +828,8 @@ protected Node( searchRequestStats, remoteStoreStatsTrackerFactory, recoverySettings, - cacheService + cacheService, + remoteStoreSettings ); final IngestService ingestService = new IngestService( diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index 97bc822be7d51..82d7ab06f126b 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -99,6 +99,7 @@ import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.analysis.AnalysisModule; @@ -261,8 +262,8 @@ private IndexService newIndexService(IndexModule module) throws IOException { new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier, () -> IndexSettings.DEFAULT_REFRESH_INTERVAL, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, - DefaultRecoverySettings.INSTANCE + DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE ); } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 85878cc2e1c9d..33f6c67b94b3d 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -34,7 +34,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory.MetadataFilenameUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; -import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.RemoteStoreSettings; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; @@ -249,7 +249,7 @@ public void testAfterMultipleCommits() throws IOException { setup(true, 3); assertDocs(indexShard, "1", "2", "3"); - for (int i = 0; i < indexShard.getRecoverySettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { + for (int i = 0; i < indexShard.getRemoteStoreSettings().getMinRemoteSegmentMetadataFiles() + 3; i++) { indexDocs(4 * (i + 1), 4); flushShard(indexShard); } @@ -635,9 +635,9 @@ private Tuple mockIn RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = indexShard.getRemoteStoreStatsTrackerFactory(); when(shard.indexSettings()).thenReturn(indexShard.indexSettings()); when(shard.shardId()).thenReturn(indexShard.shardId()); - RecoverySettings recoverySettings = mock(RecoverySettings.class); - when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); - when(shard.getRecoverySettings()).thenReturn(recoverySettings); + RemoteStoreSettings remoteStoreSettings = mock(RemoteStoreSettings.class); + when(remoteStoreSettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); + when(shard.getRemoteStoreSettings()).thenReturn(remoteStoreSettings); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index 742dbdeba8c5b..6757dbc184961 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -622,6 +622,9 @@ public void testConflictingEngineFactories() { public void testClusterRemoteTranslogBufferIntervalDefault() { IndicesService indicesService = getIndicesService(); - assertEquals(IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, indicesService.getClusterRemoteTranslogBufferInterval()); + assertEquals( + IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, + indicesService.getRemoteStoreSettings().getClusterRemoteTranslogBufferInterval() + ); } } diff --git a/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java new file mode 100644 index 0000000000000..3809a44e55901 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/RemoteStoreSettingsDynamicUpdateTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchTestCase; + +public class RemoteStoreSettingsDynamicUpdateTests extends OpenSearchTestCase { + private final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final RemoteStoreSettings remoteStoreSettings = new RemoteStoreSettings(Settings.EMPTY, clusterSettings); + + public void testSegmentMetadataRetention() { + // Default value + assertEquals(10, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5) + .build() + ); + assertEquals(5, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting min value + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1) + .build() + ); + assertEquals(-1, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value > default (10) + clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15) + .build() + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value to 0 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + + // Setting value < -1 should fail and retain the existing value + assertThrows( + IllegalArgumentException.class, + () -> clusterSettings.applySettings( + Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) + .build() + ) + ); + assertEquals(15, remoteStoreSettings.getMinRemoteSegmentMetadataFiles()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java index 18e7dfb375132..75639661f539d 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySettingsDynamicUpdateTests.java @@ -96,49 +96,4 @@ public void testInternalLongActionTimeout() { ); assertEquals(new TimeValue(duration, timeUnit), recoverySettings.internalActionLongTimeout()); } - - public void testSegmentMetadataRetention() { - // Default value - assertEquals(10, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 5).build() - ); - assertEquals(5, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting min value - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -1).build() - ); - assertEquals(-1, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value > default (10) - clusterSettings.applySettings( - Settings.builder().put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 15).build() - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value to 0 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), 0) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - - // Setting value < -1 should fail and retain the existing value - assertThrows( - IllegalArgumentException.class, - () -> clusterSettings.applySettings( - Settings.builder() - .put(RecoverySettings.CLUSTER_REMOTE_INDEX_SEGMENT_METADATA_RETENTION_MAX_COUNT_SETTING.getKey(), -5) - .build() - ) - ); - assertEquals(15, recoverySettings.getMinRemoteSegmentMetadataFiles()); - } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index e9bbf3d861408..4326e5fc63961 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -192,6 +192,7 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.filecache.FileCacheStats; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesModule; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; @@ -2077,7 +2078,8 @@ public void onFailure(final Exception e) { null, new RemoteStoreStatsTrackerFactory(clusterService, settings), DefaultRecoverySettings.INSTANCE, - new CacheModule(new ArrayList<>(), settings).getCacheService() + new CacheModule(new ArrayList<>(), settings).getCacheService(), + DefaultRemoteStoreSettings.INSTANCE ); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index a2f9eb677c0ac..9baa6110ab54e 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -116,6 +116,7 @@ import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogFactory; +import org.opensearch.indices.DefaultRemoteStoreSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.indices.recovery.AsyncRecoveryTarget; @@ -708,9 +709,9 @@ protected IndexShard newShard( checkpointPublisher, remoteStore, remoteStoreStatsTrackerFactory, - () -> IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL, "dummy-node", DefaultRecoverySettings.INSTANCE, + DefaultRemoteStoreSettings.INSTANCE, false ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); From a103b84e157a12f8217e34fa41260de7ce13969f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 28 Mar 2024 10:33:18 -0400 Subject: [PATCH 22/56] [FEATURE] Broaden SecureSettingsFactory to include http transports (#12907) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../ssl/SecureNetty4HttpServerTransport.java | 78 +++++- .../transport/Netty4ModulePlugin.java | 5 +- .../netty4/ssl/SecureNetty4Transport.java | 5 +- ...HttpServerTransportConfigurationTests.java | 242 ++++++++++++++++++ .../SecureNetty4HttpServerTransportTests.java | 46 +--- .../ssl/SimpleSecureNetty4TransportTests.java | 34 +-- .../common/network/NetworkModule.java | 39 ++- .../main/java/org/opensearch/node/Node.java | 8 +- .../org/opensearch/plugins/NetworkPlugin.java | 2 +- .../SecureHttpTransportSettingsProvider.java | 55 ++++ .../plugins/SecureSettingsFactory.java | 7 + .../SecureTransportSettingsProvider.java | 57 ++--- .../plugins/TransportExceptionHandler.java | 30 +++ .../transport/TransportAdapterProvider.java | 40 +++ .../common/network/NetworkModuleTests.java | 102 ++++++-- 16 files changed, 596 insertions(+), 155 deletions(-) create mode 100644 modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java create mode 100644 server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java create mode 100644 server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java create mode 100644 server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a18660a2d84c..de832cdc5c52f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -116,6 +116,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) +- Improve built-in secure transports support ([#12907](https://github.com/opensearch-project/OpenSearch/pull/12907)) ### Deprecated diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java index 51a76903e284d..978c92870bd75 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransport.java @@ -37,19 +37,27 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpHandlingSettings; +import org.opensearch.http.HttpServerTransport; import org.opensearch.http.netty4.Netty4HttpChannel; import org.opensearch.http.netty4.Netty4HttpServerTransport; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TransportAdapterProvider; import org.opensearch.transport.netty4.ssl.SslUtils; import javax.net.ssl.SSLEngine; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.DecoderException; import io.netty.handler.ssl.ApplicationProtocolNames; import io.netty.handler.ssl.ApplicationProtocolNegotiationHandler; @@ -59,9 +67,14 @@ * @see SecuritySSLNettyHttpServerTransport */ public class SecureNetty4HttpServerTransport extends Netty4HttpServerTransport { + public static final String REQUEST_HEADER_VERIFIER = "HeaderVerifier"; + public static final String REQUEST_DECOMPRESSOR = "RequestDecompressor"; + private static final Logger logger = LogManager.getLogger(SecureNetty4HttpServerTransport.class); - private final SecureTransportSettingsProvider secureTransportSettingsProvider; - private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; + private final TransportExceptionHandler exceptionHandler; + private final ChannelInboundHandlerAdapter headerVerifier; + private final TransportAdapterProvider decompressorProvider; public SecureNetty4HttpServerTransport( final Settings settings, @@ -72,7 +85,7 @@ public SecureNetty4HttpServerTransport( final Dispatcher dispatcher, final ClusterSettings clusterSettings, final SharedGroupFactory sharedGroupFactory, - final SecureTransportSettingsProvider secureTransportSettingsProvider, + final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, final Tracer tracer ) { super( @@ -86,9 +99,45 @@ public SecureNetty4HttpServerTransport( sharedGroupFactory, tracer ); - this.secureTransportSettingsProvider = secureTransportSettingsProvider; - this.exceptionHandler = secureTransportSettingsProvider.buildHttpServerExceptionHandler(settings, this) - .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + + this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider; + this.exceptionHandler = secureHttpTransportSettingsProvider.buildHttpServerExceptionHandler(settings, this) + .orElse(TransportExceptionHandler.NOOP); + + final List headerVerifiers = secureHttpTransportSettingsProvider.getHttpTransportAdapterProviders( + settings + ) + .stream() + .filter(p -> REQUEST_HEADER_VERIFIER.equalsIgnoreCase(p.name())) + .map(p -> p.create(settings, this, ChannelInboundHandlerAdapter.class)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (headerVerifiers.size() > 1) { + throw new IllegalArgumentException("Cannot have more than one header verifier configured, supplied " + headerVerifiers.size()); + } + + final Optional> decompressorProviderOpt = secureHttpTransportSettingsProvider + .getHttpTransportAdapterProviders(settings) + .stream() + .filter(p -> REQUEST_DECOMPRESSOR.equalsIgnoreCase(p.name())) + .findFirst(); + // There could be multiple request decompressor providers configured, using the first one + decompressorProviderOpt.ifPresent(p -> logger.debug("Using request decompressor provider: {}", p)); + + this.headerVerifier = headerVerifiers.isEmpty() ? null : headerVerifiers.get(0); + this.decompressorProvider = decompressorProviderOpt.orElseGet(() -> new TransportAdapterProvider() { + @Override + public String name() { + return REQUEST_DECOMPRESSOR; + } + + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.empty(); + } + }); } @Override @@ -152,7 +201,7 @@ protected SslHttpChannelHandler(final Netty4HttpServerTransport transport, final protected void initChannel(Channel ch) throws Exception { super.initChannel(ch); - final SSLEngine sslEngine = secureTransportSettingsProvider.buildSecureHttpServerEngine( + final SSLEngine sslEngine = secureHttpTransportSettingsProvider.buildSecureHttpServerEngine( settings, SecureNetty4HttpServerTransport.this ).orElseGet(SslUtils::createDefaultServerSSLEngine); @@ -166,4 +215,17 @@ protected void configurePipeline(Channel ch) { ch.pipeline().addLast(new Http2OrHttpHandler()); } } + + protected ChannelInboundHandlerAdapter createHeaderVerifier() { + if (headerVerifier != null) { + return headerVerifier; + } else { + return super.createHeaderVerifier(); + } + } + + @Override + protected ChannelInboundHandlerAdapter createDecompressor() { + return decompressorProvider.create(settings, this, ChannelInboundHandlerAdapter.class).orElseGet(super::createDecompressor); + } } diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java index 56163c18949a4..e2c84ab5d339a 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4ModulePlugin.java @@ -49,6 +49,7 @@ import org.opensearch.http.netty4.ssl.SecureNetty4HttpServerTransport; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; @@ -160,7 +161,7 @@ public Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, Tracer tracer ) { return Collections.singletonMap( @@ -174,7 +175,7 @@ public Map> getSecureHttpTransports( dispatcher, clusterSettings, getSharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, tracer ) ); diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java index 9c63a1ab9161b..977121346dcc3 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/netty4/ssl/SecureNetty4Transport.java @@ -42,6 +42,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.SharedGroupFactory; @@ -72,7 +73,7 @@ public class SecureNetty4Transport extends Netty4Transport { private static final Logger logger = LogManager.getLogger(SecureNetty4Transport.class); private final SecureTransportSettingsProvider secureTransportSettingsProvider; - private final SecureTransportSettingsProvider.ServerExceptionHandler exceptionHandler; + private final TransportExceptionHandler exceptionHandler; public SecureNetty4Transport( final Settings settings, @@ -100,7 +101,7 @@ public SecureNetty4Transport( this.secureTransportSettingsProvider = secureTransportSettingsProvider; this.exceptionHandler = secureTransportSettingsProvider.buildServerTransportExceptionHandler(settings, this) - .orElse(SecureTransportSettingsProvider.ServerExceptionHandler.NOOP); + .orElse(TransportExceptionHandler.NOOP); } @Override diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java new file mode 100644 index 0000000000000..1ab1ae4f5ddfd --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportConfigurationTests.java @@ -0,0 +1,242 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4.ssl; + +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.http.NullDispatcher; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; +import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.SharedGroupFactory; +import org.opensearch.transport.TransportAdapterProvider; +import org.junit.After; +import org.junit.Before; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import io.netty.channel.ChannelInboundHandlerAdapter; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests for the {@link SecureNetty4HttpServerTransport} class. + */ +public class SecureNetty4HttpServerTransportConfigurationTests extends OpenSearchTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private ClusterSettings clusterSettings; + + private static class ConfigurableSecureHttpTransportSettingsProvider implements SecureHttpTransportSettingsProvider { + private final List> transportAdapterProviders; + + public ConfigurableSecureHttpTransportSettingsProvider( + List> transportAdapterProviders + ) { + this.transportAdapterProviders = transportAdapterProviders; + } + + @Override + public Collection> getHttpTransportAdapterProviders(Settings settings) { + return transportAdapterProviders; + } + + @Override + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + return Optional.empty(); + } + + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { + return Optional.empty(); + } + } + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + clusterSettings = null; + } + + public void testRequestHeaderVerifier() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ) { + + } + } + + public void testMultipleRequestHeaderVerifiers() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider, transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ); + + assertThat(ex.getMessage(), equalTo("Cannot have more than one header verifier configured, supplied 2")); + } + + public void testRequestDecompressor() throws InterruptedException { + final TransportAdapterProvider transportAdapterProvider = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_DECOMPRESSOR; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(transportAdapterProvider)), + NoopTracer.INSTANCE + ) + ) { + + } + } + + public void testRequestDecompressorAndRequestHeaderVerifier() throws InterruptedException { + final TransportAdapterProvider requestDecompressor = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_DECOMPRESSOR; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + final TransportAdapterProvider requestHeaderVerifier = new TransportAdapterProvider() { + @Override + public String name() { + return SecureNetty4HttpServerTransport.REQUEST_HEADER_VERIFIER; + } + + @SuppressWarnings("unchecked") + @Override + public Optional create(Settings settings, HttpServerTransport transport, Class adapterClass) { + return Optional.of((C) new ChannelInboundHandlerAdapter()); + } + + }; + + try ( + final SecureNetty4HttpServerTransport transport = new SecureNetty4HttpServerTransport( + Settings.EMPTY, + networkService, + bigArrays, + threadPool, + xContentRegistry(), + new NullDispatcher(), + clusterSettings, + new SharedGroupFactory(Settings.EMPTY), + new ConfigurableSecureHttpTransportSettingsProvider(List.of(requestDecompressor, requestHeaderVerifier)), + NoopTracer.INSTANCE + ) + ) { + + } + } +} diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java index 9ea49d0b24d44..e79a066ad8f63 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/ssl/SecureNetty4HttpServerTransportTests.java @@ -29,7 +29,8 @@ import org.opensearch.http.HttpTransportSettings; import org.opensearch.http.NullDispatcher; import org.opensearch.http.netty4.Netty4HttpClient; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.rest.BytesRestResponse; import org.opensearch.rest.RestChannel; import org.opensearch.rest.RestRequest; @@ -40,7 +41,6 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.NettyAllocator; import org.opensearch.transport.SharedGroupFactory; -import org.opensearch.transport.TcpTransport; import org.opensearch.transport.netty4.ssl.TrustAllManager; import org.junit.After; import org.junit.Before; @@ -83,7 +83,6 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.ssl.ClientAuth; import io.netty.handler.ssl.SslContextBuilder; import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; @@ -104,7 +103,7 @@ public class SecureNetty4HttpServerTransportTests extends OpenSearchTestCase { private ThreadPool threadPool; private MockBigArrays bigArrays; private ClusterSettings clusterSettings; - private SecureTransportSettingsProvider secureTransportSettingsProvider; + private SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; @Before public void setup() throws Exception { @@ -113,14 +112,9 @@ public void setup() throws Exception { bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - secureTransportSettingsProvider = new SecureTransportSettingsProvider() { + secureHttpTransportSettingsProvider = new SecureHttpTransportSettingsProvider() { @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { + public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { return Optional.empty(); } @@ -146,22 +140,6 @@ public Optional buildSecureHttpServerEngine(Settings settings, HttpSe throw new SSLException(ex); } } - - @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { - return Optional.empty(); - } - - @Override - public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { - return Optional.of( - SslContextBuilder.forClient() - .clientAuth(ClientAuth.NONE) - .trustManager(TrustAllManager.INSTANCE) - .build() - .newEngine(NettyAllocator.getAllocator()) - ); - } }; } @@ -241,7 +219,7 @@ public void dispatchBadRequest(RestChannel channel, ThreadContext threadContext, dispatcher, clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -292,7 +270,7 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(Settings.EMPTY), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -312,7 +290,7 @@ public void testBindUnavailableAddress() { new NullDispatcher(), clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -366,7 +344,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -430,7 +408,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, clusterSettings, new SharedGroupFactory(Settings.EMPTY), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -487,7 +465,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { @@ -562,7 +540,7 @@ public void dispatchBadRequest(final RestChannel channel, final ThreadContext th dispatcher, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new SharedGroupFactory(settings), - secureTransportSettingsProvider, + secureHttpTransportSettingsProvider, NoopTracer.INSTANCE ) ) { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java index 0cae58b8efa2a..df3b005f40903 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/ssl/SimpleSecureNetty4TransportTests.java @@ -20,8 +20,8 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.http.HttpServerTransport; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.transport.MockTransportService; import org.opensearch.test.transport.StubbableTransport; @@ -69,40 +69,12 @@ protected Transport build(Settings settings, final Version version, ClusterSetti NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); final SecureTransportSettingsProvider secureTransportSettingsProvider = new SecureTransportSettingsProvider() { @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { + public Optional buildServerTransportExceptionHandler(Settings settings, Transport transport) { return Optional.empty(); } @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { - try { - final KeyStore keyStore = KeyStore.getInstance("PKCS12"); - keyStore.load( - SimpleSecureNetty4TransportTests.class.getResourceAsStream("/netty4-secure.jks"), - "password".toCharArray() - ); - - final KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance("SunX509"); - keyManagerFactory.init(keyStore, "password".toCharArray()); - - SSLEngine engine = SslContextBuilder.forServer(keyManagerFactory) - .trustManager(TrustAllManager.INSTANCE) - .build() - .newEngine(NettyAllocator.getAllocator()); - return Optional.of(engine); - } catch (final IOException | NoSuchAlgorithmException | UnrecoverableKeyException | KeyStoreException - | CertificateException ex) { - throw new SSLException(ex); - } - } - - @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { + public Optional buildSecureServerTransportEngine(Settings settings, Transport transport) throws SSLException { try { final KeyStore keyStore = KeyStore.getInstance("PKCS12"); keyStore.load( diff --git a/server/src/main/java/org/opensearch/common/network/NetworkModule.java b/server/src/main/java/org/opensearch/common/network/NetworkModule.java index d0f5dd9e4581d..bb8da190a6f35 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkModule.java @@ -55,6 +55,8 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SecureTransportSettingsProvider; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; import org.opensearch.tasks.RawTaskStatus; @@ -74,7 +76,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * A module to handle registering and binding all network related classes. @@ -173,13 +177,31 @@ public NetworkModule( ClusterSettings clusterSettings, Tracer tracer, List transportInterceptors, - Collection secureTransportSettingsProvider + Collection secureSettingsFactories ) { this.settings = settings; - if (secureTransportSettingsProvider.size() > 1) { + final Collection secureTransportSettingsProviders = secureSettingsFactories.stream() + .map(p -> p.getSecureTransportSettingsProvider(settings)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (secureTransportSettingsProviders.size() > 1) { + throw new IllegalArgumentException( + "there is more than one secure transport settings provider: " + secureTransportSettingsProviders + ); + } + + final Collection secureHttpTransportSettingsProviders = secureSettingsFactories.stream() + .map(p -> p.getSecureHttpTransportSettingsProvider(settings)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + + if (secureHttpTransportSettingsProviders.size() > 1) { throw new IllegalArgumentException( - "there is more than one secure transport settings provider: " + secureTransportSettingsProvider + "there is more than one secure HTTP transport settings provider: " + secureHttpTransportSettingsProviders ); } @@ -213,9 +235,9 @@ public NetworkModule( registerTransport(entry.getKey(), entry.getValue()); } - // Register any secure transports if available - if (secureTransportSettingsProvider.isEmpty() == false) { - final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProvider.iterator().next(); + // Register any HTTP secure transports if available + if (secureHttpTransportSettingsProviders.isEmpty() == false) { + final SecureHttpTransportSettingsProvider secureSettingProvider = secureHttpTransportSettingsProviders.iterator().next(); final Map> secureHttpTransportFactory = plugin.getSecureHttpTransports( settings, @@ -233,6 +255,11 @@ public NetworkModule( for (Map.Entry> entry : secureHttpTransportFactory.entrySet()) { registerHttpTransport(entry.getKey(), entry.getValue()); } + } + + // Register any secure transports if available + if (secureTransportSettingsProviders.isEmpty() == false) { + final SecureTransportSettingsProvider secureSettingProvider = secureTransportSettingsProviders.iterator().next(); final Map> secureTransportFactory = plugin.getSecureTransports( settings, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 8348b8f02d342..7fa2b6c8ff497 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -203,7 +203,7 @@ import org.opensearch.plugins.ScriptPlugin; import org.opensearch.plugins.SearchPipelinePlugin; import org.opensearch.plugins.SearchPlugin; -import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; @@ -950,9 +950,9 @@ protected Node( admissionControlService ); - final Collection secureTransportSettingsProviders = pluginsService.filterPlugins(Plugin.class) + final Collection secureSettingsFactories = pluginsService.filterPlugins(Plugin.class) .stream() - .map(p -> p.getSecureSettingFactory(settings).flatMap(f -> f.getSecureTransportSettingsProvider(settings))) + .map(p -> p.getSecureSettingFactory(settings)) .filter(Optional::isPresent) .map(Optional::get) .collect(Collectors.toList()); @@ -972,7 +972,7 @@ protected Node( clusterService.getClusterSettings(), tracer, transportInterceptors, - secureTransportSettingsProviders + secureSettingsFactories ); Collection>> indexTemplateMetadataUpgraders = pluginsService.filterPlugins( diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 679833c9f6e0d..138ef6f71280d 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -139,7 +139,7 @@ default Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher dispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider, Tracer tracer ) { return Collections.emptyMap(); diff --git a/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java new file mode 100644 index 0000000000000..ff86cbc04e240 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/SecureHttpTransportSettingsProvider.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; +import org.opensearch.http.HttpServerTransport; +import org.opensearch.transport.TransportAdapterProvider; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; + +import java.util.Collection; +import java.util.Collections; +import java.util.Optional; + +/** + * A provider for security related settings for HTTP transports. + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface SecureHttpTransportSettingsProvider { + /** + * Collection of additional {@link TransportAdapterProvider}s that are specific to particular HTTP transport + * @param settings settings + * @return a collection of additional {@link TransportAdapterProvider}s + */ + default Collection> getHttpTransportAdapterProviders(Settings settings) { + return Collections.emptyList(); + } + + /** + * If supported, builds the {@link TransportExceptionHandler} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link TransportExceptionHandler} instance + */ + Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport); + + /** + * If supported, builds the {@link SSLEngine} instance for {@link HttpServerTransport} instance + * @param settings settings + * @param transport {@link HttpServerTransport} instance + * @return if supported, builds the {@link SSLEngine} instance + * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + */ + Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException; +} diff --git a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java index b98d9cf51c129..ec2276ecc62ef 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java +++ b/server/src/main/java/org/opensearch/plugins/SecureSettingsFactory.java @@ -26,4 +26,11 @@ public interface SecureSettingsFactory { * @return optionally, the instance of the {@link SecureTransportSettingsProvider} */ Optional getSecureTransportSettingsProvider(Settings settings); + + /** + * Creates (or provides pre-created) instance of the {@link SecureHttpTransportSettingsProvider} + * @param settings settings + * @return optionally, the instance of the {@link SecureHttpTransportSettingsProvider} + */ + Optional getSecureHttpTransportSettingsProvider(Settings settings); } diff --git a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java index 6d038ed30c8ff..5b7402a01f82d 100644 --- a/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java +++ b/server/src/main/java/org/opensearch/plugins/SecureTransportSettingsProvider.java @@ -10,12 +10,14 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.Settings; -import org.opensearch.http.HttpServerTransport; -import org.opensearch.transport.TcpTransport; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportAdapterProvider; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLException; +import java.util.Collection; +import java.util.Collections; import java.util.Optional; /** @@ -26,57 +28,30 @@ @ExperimentalApi public interface SecureTransportSettingsProvider { /** - * An exception handler for errors that might happen while secure transport handle the requests. - * - * @see SslExceptionHandler - * - * @opensearch.experimental - */ - @ExperimentalApi - @FunctionalInterface - interface ServerExceptionHandler { - static ServerExceptionHandler NOOP = t -> {}; - - /** - * Handler for errors happening during the server side processing of the requests - * @param t the error - */ - void onError(Throwable t); - } - - /** - * If supported, builds the {@link ServerExceptionHandler} instance for {@link HttpServerTransport} instance - * @param settings settings - * @param transport {@link HttpServerTransport} instance - * @return if supported, builds the {@link ServerExceptionHandler} instance - */ - Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport); - - /** - * If supported, builds the {@link ServerExceptionHandler} instance for {@link TcpTransport} instance + * Collection of additional {@link TransportAdapterProvider}s that are specific to particular transport * @param settings settings - * @param transport {@link TcpTransport} instance - * @return if supported, builds the {@link ServerExceptionHandler} instance + * @return a collection of additional {@link TransportAdapterProvider}s */ - Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport); + default Collection> getTransportAdapterProviders(Settings settings) { + return Collections.emptyList(); + } /** - * If supported, builds the {@link SSLEngine} instance for {@link HttpServerTransport} instance + * If supported, builds the {@link TransportExceptionHandler} instance for {@link Transport} instance * @param settings settings - * @param transport {@link HttpServerTransport} instance - * @return if supported, builds the {@link SSLEngine} instance - * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built + * @param transport {@link Transport} instance + * @return if supported, builds the {@link TransportExceptionHandler} instance */ - Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException; + Optional buildServerTransportExceptionHandler(Settings settings, Transport transport); /** - * If supported, builds the {@link SSLEngine} instance for {@link TcpTransport} instance + * If supported, builds the {@link SSLEngine} instance for {@link Transport} instance * @param settings settings - * @param transport {@link TcpTransport} instance + * @param transport {@link Transport} instance * @return if supported, builds the {@link SSLEngine} instance * @throws SSLException throws SSLException if the {@link SSLEngine} instance cannot be built */ - Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException; + Optional buildSecureServerTransportEngine(Settings settings, Transport transport) throws SSLException; /** * If supported, builds the {@link SSLEngine} instance for client transport instance diff --git a/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java b/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java new file mode 100644 index 0000000000000..a6b935a6b97bc --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/TransportExceptionHandler.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * An exception handler for errors that might happen while secure transport handle the requests. + * + * @see SslExceptionHandler + * + * @opensearch.experimental + */ +@ExperimentalApi +@FunctionalInterface +public interface TransportExceptionHandler { + static TransportExceptionHandler NOOP = t -> {}; + + /** + * Handler for errors happening during the server side processing of the requests + * @param t the error + */ + void onError(Throwable t); +} diff --git a/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java new file mode 100644 index 0000000000000..36dbd5a699b40 --- /dev/null +++ b/server/src/main/java/org/opensearch/transport/TransportAdapterProvider.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.settings.Settings; + +import java.util.Optional; + +/** + * Transport specific adapter providers which could be injected into the transport processing chain. The transport adapters + * are transport specific and do not have any common abstraction on top. + * @param transport type + * + * @opensearch.experimental + */ +@ExperimentalApi +public interface TransportAdapterProvider { + /** + * The name of this transport adapter provider (and essentially are freestyle). + * @return the name of this transport adapter provider + */ + String name(); + + /** + * Provides a new transport adapter of required transport adapter class and transport instance. + * @param transport adapter class + * @param settings settings + * @param transport HTTP transport instance + * @param adapterClass required transport adapter class + * @return the non-empty {@link Optional} if the transport adapter could be created, empty one otherwise + */ + Optional create(Settings settings, T transport, Class adapterClass); +} diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index 1c607ca0dc98b..447377e372e61 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -47,13 +47,15 @@ import org.opensearch.http.HttpStats; import org.opensearch.http.NullDispatcher; import org.opensearch.plugins.NetworkPlugin; +import org.opensearch.plugins.SecureHttpTransportSettingsProvider; +import org.opensearch.plugins.SecureSettingsFactory; import org.opensearch.plugins.SecureTransportSettingsProvider; +import org.opensearch.plugins.TransportExceptionHandler; import org.opensearch.telemetry.tracing.Tracer; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TcpTransport; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; @@ -73,38 +75,60 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import static org.hamcrest.CoreMatchers.startsWith; + public class NetworkModuleTests extends OpenSearchTestCase { private ThreadPool threadPool; - private SecureTransportSettingsProvider secureTransportSettingsProvider; + private SecureSettingsFactory secureSettingsFactory; @Override public void setUp() throws Exception { super.setUp(); threadPool = new TestThreadPool(NetworkModuleTests.class.getName()); - secureTransportSettingsProvider = new SecureTransportSettingsProvider() { - @Override - public Optional buildHttpServerExceptionHandler(Settings settings, HttpServerTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildServerTransportExceptionHandler(Settings settings, TcpTransport transport) { - return Optional.empty(); - } - - @Override - public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) throws SSLException { - return Optional.empty(); - } + secureSettingsFactory = new SecureSettingsFactory() { @Override - public Optional buildSecureServerTransportEngine(Settings settings, TcpTransport transport) throws SSLException { - return Optional.empty(); + public Optional getSecureTransportSettingsProvider(Settings settings) { + return Optional.of(new SecureTransportSettingsProvider() { + @Override + public Optional buildServerTransportExceptionHandler( + Settings settings, + Transport transport + ) { + return Optional.empty(); + } + + @Override + public Optional buildSecureServerTransportEngine(Settings settings, Transport transport) + throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) + throws SSLException { + return Optional.empty(); + } + }); } @Override - public Optional buildSecureClientTransportEngine(Settings settings, String hostname, int port) throws SSLException { - return Optional.empty(); + public Optional getSecureHttpTransportSettingsProvider(Settings settings) { + return Optional.of(new SecureHttpTransportSettingsProvider() { + @Override + public Optional buildSecureHttpServerEngine(Settings settings, HttpServerTransport transport) + throws SSLException { + return Optional.empty(); + } + + @Override + public Optional buildHttpServerExceptionHandler( + Settings settings, + HttpServerTransport transport + ) { + return Optional.empty(); + } + }); } }; } @@ -211,7 +235,7 @@ public Map> getSecureTransports( return Collections.singletonMap("custom-secure", custom); } }; - NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), plugin); + NetworkModule module = newNetworkModule(settings, null, List.of(secureSettingsFactory), plugin); assertSame(custom, module.getTransportSupplier()); } @@ -222,7 +246,7 @@ public void testRegisterSecureHttpTransport() { .build(); Supplier custom = FakeHttpTransport::new; - NetworkModule module = newNetworkModule(settings, null, List.of(secureTransportSettingsProvider), new NetworkPlugin() { + NetworkModule module = newNetworkModule(settings, null, List.of(secureSettingsFactory), new NetworkPlugin() { @Override public Map> getSecureHttpTransports( Settings settings, @@ -234,7 +258,7 @@ public Map> getSecureHttpTransports( NetworkService networkService, HttpServerTransport.Dispatcher requestDispatcher, ClusterSettings clusterSettings, - SecureTransportSettingsProvider secureTransportSettingsProvider, + SecureHttpTransportSettingsProvider secureTransportSettingsProvider, Tracer tracer ) { return Collections.singletonMap("custom-secure", custom); @@ -595,7 +619,7 @@ private NetworkModule newNetworkModule( private NetworkModule newNetworkModule( Settings settings, List coreTransportInterceptors, - List secureTransportSettingsProviders, + List secureSettingsFactories, NetworkPlugin... plugins ) { return new NetworkModule( @@ -612,7 +636,33 @@ private NetworkModule newNetworkModule( new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), NoopTracer.INSTANCE, coreTransportInterceptors, - secureTransportSettingsProviders + secureSettingsFactories + ); + } + + public void testRegisterSecureTransportMultipleProviers() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom-secure").build(); + Supplier custom = () -> null; // content doesn't matter we check reference equality + NetworkPlugin plugin = new NetworkPlugin() { + @Override + public Map> getSecureTransports( + Settings settings, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService, + SecureTransportSettingsProvider secureTransportSettingsProvider, + Tracer tracer + ) { + return Collections.singletonMap("custom-secure", custom); + } + }; + + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> newNetworkModule(settings, null, List.of(secureSettingsFactory, secureSettingsFactory), plugin) ); + assertThat(ex.getMessage(), startsWith("there is more than one secure transport settings provider")); } } From eba3b572a2ed778ac62cce661a894c7f561ecd48 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Thu, 28 Mar 2024 14:52:08 -0700 Subject: [PATCH 23/56] Disable concurrent segment search for system indices and throttled search requests (#12954) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../search/DefaultSearchContext.java | 9 +- .../search/DefaultSearchContextTests.java | 83 ++++++++++++++++++- 3 files changed, 90 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index de832cdc5c52f..f44c993f6faa0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) +- [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index 061aa2f6e5896..c76ea71c0a094 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -962,6 +962,12 @@ public BucketCollectorProcessor bucketCollectorProcessor() { * false: otherwise */ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearchExecutor) { + // Do not use concurrent segment search for system indices or throttled requests. See: + // https://github.com/opensearch-project/OpenSearch/issues/12951 + if (indexShard.isSystem() || indexShard.indexSettings().isSearchThrottled()) { + return false; + } + if ((clusterService != null) && (concurrentSearchExecutor != null)) { return indexService.getIndexSettings() .getSettings() @@ -969,9 +975,8 @@ private boolean evaluateConcurrentSegmentSearchSettings(Executor concurrentSearc IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) ); - } else { - return false; } + return false; } @Override diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 3793249d569f0..a1a808c9faa9b 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -93,6 +93,7 @@ import java.util.function.Function; import java.util.function.Supplier; +import static org.opensearch.index.IndexSettings.INDEX_SEARCH_THROTTLED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; @@ -168,6 +169,7 @@ public void testPreProcess() throws Exception { IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); when(indexService.getIndexSettings()).thenReturn(indexSettings); when(mapperService.getIndexSettings()).thenReturn(indexSettings); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -486,6 +488,14 @@ public void testClearQueryCancellationsOnClose() throws IOException { when(indexService.newQueryShardContext(eq(shardId.id()), any(), any(), nullable(String.class), anyBoolean())).thenReturn( queryShardContext ); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); @@ -551,7 +561,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } } - public void testSearchPathEvaluationUsingSortField() throws Exception { + public void testSearchPathEvaluation() throws Exception { ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); when(shardSearchRequest.searchType()).thenReturn(SearchType.DEFAULT); ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); @@ -578,9 +588,24 @@ public void testSearchPathEvaluationUsingSortField() throws Exception { IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(settings).build(); IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); when(indexService.getIndexSettings()).thenReturn(indexSettings); + when(indexShard.indexSettings()).thenReturn(indexSettings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + IndexShard systemIndexShard = mock(IndexShard.class); + when(systemIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(systemIndexShard.getThreadPool()).thenReturn(threadPool); + when(systemIndexShard.isSystem()).thenReturn(true); + + IndexShard throttledIndexShard = mock(IndexShard.class); + when(throttledIndexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(throttledIndexShard.getThreadPool()).thenReturn(threadPool); + IndexSettings throttledIndexSettings = new IndexSettings( + indexMetadata, + Settings.builder().put(INDEX_SEARCH_THROTTLED.getKey(), true).build() + ); + when(throttledIndexShard.indexSettings()).thenReturn(throttledIndexSettings); + try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { final Supplier searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) { @@ -697,6 +722,62 @@ protected Engine.Searcher acquireSearcherInternal(String source) { } assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // Case 4: With a system index concurrent segment search is not used + readerContext = new ReaderContext( + newContextId(), + indexService, + systemIndexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + + // Case 5: When search is throttled concurrent segment search is not used + readerContext = new ReaderContext( + newContextId(), + indexService, + throttledIndexShard, + searcherSupplier.get(), + randomNonNegativeLong(), + false + ); + context = new DefaultSearchContext( + readerContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + null, + null, + false, + Version.CURRENT, + false, + executor, + null + ); + context.evaluateRequestShouldUseConcurrentSearch(); + assertFalse(context.shouldUseConcurrentSearch()); + assertThrows(SetOnce.AlreadySetException.class, context::evaluateRequestShouldUseConcurrentSearch); + // shutdown the threadpool threadPool.shutdown(); } From 8426e14371ce15a7ecb80ff35c109e5830027888 Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Thu, 28 Mar 2024 20:43:00 -0700 Subject: [PATCH 24/56] Update version checks for shard search idle metric after backport to 2.x (#12972) --- .../rest-api-spec/test/cat.shards/10_basic.yml | 10 +++++----- .../org/opensearch/index/search/stats/SearchStats.java | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index c309f19b454e7..989ea6b93f47f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,13 +1,13 @@ "Help": - skip: - version: " - 2.99.99" + version: " - 2.13.99" reason: search idle reactivate count total is only added in 3.0.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "3.0.0 - " + version: "2.14.0 - " - match: $body: | @@ -93,16 +93,16 @@ docs.deleted .+ \n $/ --- -"Help from 2.12.0 to 2.99.99": +"Help from 2.12.0 to 2.13.99": - skip: - version: " - 2.11.99 , 3.0.0 - " + version: " - 2.11.99 , 2.14.0 - " reason: deleted docs and concurrent search are added in 2.12.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "2.12.0 - 2.99.99" + version: "2.12.0 - 2.13.99" - match: $body: | diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index d3a53fcc0e2d8..bb61e1afa05f4 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -261,7 +261,7 @@ private Stats(StreamInput in) throws IOException { queryConcurrency = in.readVLong(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_14_0)) { searchIdleReactivateCount = in.readVLong(); } } @@ -475,7 +475,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(queryConcurrency); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_14_0)) { out.writeVLong(searchIdleReactivateCount); } } From 62776d1bd007626f270cc263e1aafabc2c80c063 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 10:41:18 -0400 Subject: [PATCH 25/56] Bump commons-io:commons-io from 2.15.1 to 2.16.0 in /plugins/repository-hdfs (#12996) * Bump commons-io:commons-io in /plugins/repository-hdfs Bumps commons-io:commons-io from 2.15.1 to 2.16.0. --- updated-dependencies: - dependency-name: commons-io:commons-io dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index f44c993f6faa0..38246f566cf6e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `asm` from 9.6 to 9.7 ([#12908](https://github.com/opensearch-project/OpenSearch/pull/12908)) - Bump `net.minidev:json-smart` from 2.5.0 to 2.5.1 ([#12893](https://github.com/opensearch-project/OpenSearch/pull/12893)) - Bump `netty` from 4.1.107.Final to 4.1.108.Final ([#12924](https://github.com/opensearch-project/OpenSearch/pull/12924)) +- Bump `commons-io:commons-io` from 2.15.1 to 2.16.0 ([#12996](https://github.com/opensearch-project/OpenSearch/pull/12996)) ### Changed - [BWC and API enforcement] Enforcing the presence of API annotations at build time ([#12872](https://github.com/opensearch-project/OpenSearch/pull/12872)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index af07fa9e5d80b..6faf0383d3ba2 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -75,7 +75,7 @@ dependencies { api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.10.1' - api 'commons-io:commons-io:2.15.1' + api 'commons-io:commons-io:2.16.0' api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' diff --git a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 deleted file mode 100644 index 47c5d13812a36..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 new file mode 100644 index 0000000000000..6a7b638719fa3 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.16.0.jar.sha1 @@ -0,0 +1 @@ +27875a7935f1ddcc13267eb6fae1f719e0409572 \ No newline at end of file From 1ec49bd3aee6cd4463ae91ffb6c09054d1bbc02b Mon Sep 17 00:00:00 2001 From: Mohammad Hasnain Mohsin Rajan Date: Tue, 2 Apr 2024 03:38:49 +0530 Subject: [PATCH 26/56] feat: constant keyword field (#12285) Constant keyword fields behave similarly to regular keyword fields, except that they are defined only in the index mapping, and all documents in the index appear to have the same value for the constant keyword field. --------- Signed-off-by: Mohammad Hasnain --- CHANGELOG.md | 1 + .../mapper/ConstantKeywordFieldMapper.java | 191 ++++++++++++++++++ .../org/opensearch/indices/IndicesModule.java | 2 + .../ConstantKeywordFieldMapperTests.java | 114 +++++++++++ .../mapper/ConstantKeywordFieldTypeTests.java | 93 +++++++++ .../terms/SignificantTextAggregatorTests.java | 4 +- .../aggregations/AggregatorTestCase.java | 5 + 7 files changed, 409 insertions(+), 1 deletion(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 38246f566cf6e..2a43b676ce166 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Constant Keyword Field ([#12285](https://github.com/opensearch-project/OpenSearch/pull/12285)) - Convert ingest processor supports ip type ([#12818](https://github.com/opensearch-project/OpenSearch/pull/12818)) - Add a counter to node stat api to track shard going from idle to non-idle ([#12768](https://github.com/opensearch-project/OpenSearch/pull/12768)) - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) diff --git a/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java new file mode 100644 index 0000000000000..f4730c70362d1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/ConstantKeywordFieldMapper.java @@ -0,0 +1,191 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.regex.Regex; +import org.opensearch.index.fielddata.IndexFieldData; +import org.opensearch.index.fielddata.plain.ConstantIndexFieldData; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.aggregations.support.CoreValuesSourceType; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +/** + * Index specific field mapper + * + * @opensearch.api + */ +@PublicApi(since = "2.14.0") +public class ConstantKeywordFieldMapper extends ParametrizedFieldMapper { + + public static final String CONTENT_TYPE = "constant_keyword"; + + private static final String valuePropertyName = "value"; + + /** + * A {@link Mapper.TypeParser} for the constant keyword field. + * + * @opensearch.internal + */ + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + if (!node.containsKey(valuePropertyName)) { + throw new OpenSearchParseException("Field [" + name + "] is missing required parameter [value]"); + } + Object value = node.remove(valuePropertyName); + if (!(value instanceof String)) { + throw new OpenSearchParseException("Field [" + name + "] is expected to be a string value"); + } + return new Builder(name, (String) value); + } + } + + private static ConstantKeywordFieldMapper toType(FieldMapper in) { + return (ConstantKeywordFieldMapper) in; + } + + /** + * Builder for the binary field mapper + * + * @opensearch.internal + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + + private final Parameter value; + + public Builder(String name, String value) { + super(name); + this.value = Parameter.stringParam(valuePropertyName, false, m -> toType(m).value, value); + } + + @Override + public List> getParameters() { + return Arrays.asList(value); + } + + @Override + public ConstantKeywordFieldMapper build(BuilderContext context) { + return new ConstantKeywordFieldMapper( + name, + new ConstantKeywordFieldMapper.ConstantKeywordFieldType(buildFullName(context), value.getValue()), + multiFieldsBuilder.build(this, context), + copyTo.build(), + this + ); + } + } + + /** + * Field type for Index field mapper + * + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + protected static final class ConstantKeywordFieldType extends ConstantFieldType { + + protected final String value; + + public ConstantKeywordFieldType(String name, String value) { + super(name, Collections.emptyMap()); + this.value = value; + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + protected boolean matches(String pattern, boolean caseInsensitive, QueryShardContext context) { + return Regex.simpleMatch(pattern, value, caseInsensitive); + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new MatchAllDocsQuery(); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + return new ConstantIndexFieldData.Builder(fullyQualifiedIndexName, name(), CoreValuesSourceType.BYTES); + } + + @Override + public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchLookup, String format) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't " + "support formats."); + } + + return new SourceValueFetcher(name(), context) { + @Override + protected Object parseSourceValue(Object value) { + String keywordValue = value.toString(); + return Collections.singletonList(keywordValue); + } + }; + } + } + + private final String value; + + protected ConstantKeywordFieldMapper( + String simpleName, + MappedFieldType mappedFieldType, + MultiFields multiFields, + CopyTo copyTo, + ConstantKeywordFieldMapper.Builder builder + ) { + super(simpleName, mappedFieldType, multiFields, copyTo); + this.value = builder.value.getValue(); + } + + public ParametrizedFieldMapper.Builder getMergeBuilder() { + return new ConstantKeywordFieldMapper.Builder(simpleName(), this.value).init(this); + } + + @Override + protected void parseCreateField(ParseContext context) throws IOException { + + final String value; + if (context.externalValueSet()) { + value = context.externalValue().toString(); + } else { + value = context.parser().textOrNull(); + } + if (value == null) { + throw new IllegalArgumentException("constant keyword field [" + name() + "] must have a value"); + } + + if (!value.equals(fieldType().value)) { + throw new IllegalArgumentException("constant keyword field [" + name() + "] must have a value of [" + this.value + "]"); + } + + } + + @Override + public ConstantKeywordFieldMapper.ConstantKeywordFieldType fieldType() { + return (ConstantKeywordFieldMapper.ConstantKeywordFieldType) super.fieldType(); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index b86e98f4ebcbc..fee2888c7a3fb 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -46,6 +46,7 @@ import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.BooleanFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.DocCountFieldMapper; @@ -168,6 +169,7 @@ public static Map getMappers(List mappe mappers.put(FieldAliasMapper.CONTENT_TYPE, new FieldAliasMapper.TypeParser()); mappers.put(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); mappers.put(FlatObjectFieldMapper.CONTENT_TYPE, FlatObjectFieldMapper.PARSER); + mappers.put(ConstantKeywordFieldMapper.CONTENT_TYPE, new ConstantKeywordFieldMapper.TypeParser()); for (MapperPlugin mapperPlugin : mapperPlugins) { for (Map.Entry entry : mapperPlugin.getMappers().entrySet()) { diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java new file mode 100644 index 0000000000000..65dd3b6447663 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldMapperTests.java @@ -0,0 +1,114 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.compress.CompressedXContent; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.IndexService; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +import static org.hamcrest.Matchers.containsString; + +public class ConstantKeywordFieldMapperTests extends OpenSearchSingleNodeTestCase { + + private IndexService indexService; + private DocumentMapperParser parser; + + @Override + protected Collection> getPlugins() { + return pluginList(InternalSettingsPlugin.class); + } + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + public void testDefaultDisabledIndexMapper() throws Exception { + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "constant_keyword") + .field("value", "default_value") + .endObject() + .startObject("field2") + .field("type", "keyword") + .endObject(); + mapping = mapping.endObject().endObject().endObject(); + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping.toString())); + + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> { + b.field("field", "sdf"); + b.field("field2", "szdfvsddf"); + }))); + assertThat( + e.getMessage(), + containsString( + "failed to parse field [field] of type [constant_keyword] in document with id '1'. Preview of field's value: 'sdf'" + ) + ); + + final ParsedDocument doc = mapper.parse(source(b -> { + b.field("field", "default_value"); + b.field("field2", "field_2_value"); + })); + + final IndexableField field = doc.rootDoc().getField("field"); + + // constantKeywordField should not be stored + assertNull(field); + } + + public void testMissingDefaultIndexMapper() throws Exception { + + final XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "constant_keyword") + .endObject() + .startObject("field2") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .endObject(); + + OpenSearchParseException e = expectThrows( + OpenSearchParseException.class, + () -> parser.parse("type", new CompressedXContent(mapping.toString())) + ); + assertThat(e.getMessage(), containsString("Field [field] is missing required parameter [value]")); + } + + private final SourceToParse source(CheckedConsumer build) throws IOException { + XContentBuilder builder = JsonXContent.contentBuilder().startObject(); + build.accept(builder); + builder.endObject(); + return new SourceToParse("test", "1", BytesReference.bytes(builder), MediaTypeRegistry.JSON); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java new file mode 100644 index 0000000000000..235811539a299 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/ConstantKeywordFieldTypeTests.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.regex.Regex; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.query.QueryShardContext; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Predicate; + +public class ConstantKeywordFieldTypeTests extends FieldTypeTestCase { + + final ConstantKeywordFieldMapper.ConstantKeywordFieldType ft = new ConstantKeywordFieldMapper.ConstantKeywordFieldType( + "field", + "default" + ); + + public void testTermQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termQuery("default", createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termQuery("not_default", createContext())); + } + + public void testTermsQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termsQuery(Arrays.asList("default", "not_default"), createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(Arrays.asList("no_default", "not_default"), createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termsQuery(List.of(), createContext())); + } + + public void testInsensitiveTermQuery() { + assertEquals(new MatchAllDocsQuery(), ft.termQueryCaseInsensitive("defaUlt", createContext())); + assertEquals(new MatchNoDocsQuery(), ft.termQueryCaseInsensitive("not_defaUlt", createContext())); + } + + public void testPrefixQuery() { + assertEquals(new MatchAllDocsQuery(), ft.prefixQuery("defau", null, createContext())); + assertEquals(new MatchNoDocsQuery(), ft.prefixQuery("not_default", null, createContext())); + } + + public void testWildcardQuery() { + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("defa*lt", null, createContext())); + assertEquals(new MatchNoDocsQuery(), ft.wildcardQuery("no_defa*lt", null, createContext())); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("defa*", null, createContext())); + assertEquals(new MatchAllDocsQuery(), ft.wildcardQuery("*ult", null, createContext())); + + } + + public void testExistsQuery() { + assertEquals(new MatchAllDocsQuery(), ft.existsQuery(createContext())); + } + + private QueryShardContext createContext() { + IndexMetadata indexMetadata = IndexMetadata.builder("index") + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY); + + Predicate indexNameMatcher = pattern -> Regex.simpleMatch(pattern, "index"); + return new QueryShardContext( + 0, + indexSettings, + null, + null, + null, + null, + null, + null, + xContentRegistry(), + writableRegistry(), + null, + null, + System::currentTimeMillis, + null, + indexNameMatcher, + () -> true, + null + ); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java index e9b2d40fd4ede..644cee57bd5a4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorTests.java @@ -50,6 +50,7 @@ import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; import org.opensearch.index.mapper.BinaryFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.FlatObjectFieldMapper; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -104,7 +105,8 @@ protected List unsupportedMappedFieldTypes() { return Arrays.asList( BinaryFieldMapper.CONTENT_TYPE, // binary fields are not supported because they do not have analyzers GeoPointFieldMapper.CONTENT_TYPE, // geopoint fields cannot use term queries - FlatObjectFieldMapper.CONTENT_TYPE // flat_object fields are not supported aggregations + FlatObjectFieldMapper.CONTENT_TYPE, // flat_object fields are not supported aggregations + ConstantKeywordFieldMapper.CONTENT_TYPE // binary fields are not supported because they do not have analyzers ); } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 4eb49ebb42241..f83163bd139cd 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -95,6 +95,7 @@ import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; +import org.opensearch.index.mapper.ConstantKeywordFieldMapper; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.FieldAliasMapper; @@ -778,6 +779,10 @@ public void testSupportedFieldTypes() throws IOException { source.put("doc_values", "true"); } + if (mappedType.getKey().equals(ConstantKeywordFieldMapper.CONTENT_TYPE) == true) { + source.put("value", "default_value"); + } + Mapper.Builder builder = mappedType.getValue().parse(fieldName, source, new MockParserContext()); FieldMapper mapper = (FieldMapper) builder.build(new BuilderContext(settings, new ContentPath())); From 37569bad2665f0455fea39d6efc8f852bcab7cce Mon Sep 17 00:00:00 2001 From: Mrudhul Guda Date: Tue, 2 Apr 2024 03:53:56 +0530 Subject: [PATCH 27/56] FIX: UOE While building Exists query for nested search_as_you_type field (#12048) The "exists" query on an object field would fail when a "search_as_you_type" field is nested under that object. It would also fail for a text field with prefixes enabled nested under the object, or any other field with a "hidden" subfield. --------- Signed-off-by: Mrudhul Guda --- CHANGELOG.md | 1 + .../SearchAsYouTypeFieldMapperTests.java | 27 +++++++++++++++++++ .../index/query/ExistsQueryBuilder.java | 5 ++++ .../index/mapper/MapperServiceTestCase.java | 3 +++ 4 files changed, 36 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a43b676ce166..e8e39d3b5ee00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,6 +98,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) - Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) - Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) +- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) ### Security diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index b5f687ce34d4b..f55ad2e9d659c 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -47,6 +47,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; @@ -68,6 +69,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.QueryStringQueryBuilder; import org.opensearch.plugins.Plugin; import java.io.IOException; @@ -541,6 +543,31 @@ public void testMatchPhrase() throws IOException { } } + public void testNestedExistsQuery() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", "object"); + b.startObject("properties"); + { + b.startObject("nested_field"); + { + b.field("type", "search_as_you_type"); + } + b.endObject(); + } + b.endObject(); + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + Query actual = new QueryStringQueryBuilder("field:*").toQuery(queryShardContext); + Query expected = new ConstantScoreQuery( + new BooleanQuery.Builder().add(new NormsFieldExistsQuery("field.nested_field"), BooleanClause.Occur.SHOULD).build() + ); + assertEquals(expected, actual); + } + private static BooleanQuery buildBoolPrefixQuery(String shingleFieldName, String prefixFieldName, List terms) { final BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (int i = 0; i < terms.size() - 1; i++) { diff --git a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java index 7fd83d5753512..3011a48fbb296 100644 --- a/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/ExistsQueryBuilder.java @@ -201,6 +201,11 @@ private static Query newObjectFieldExistsQuery(QueryShardContext context, String BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder(); Collection fields = context.simpleMatchToIndexNames(objField + ".*"); for (String field : fields) { + int dotPos = field.lastIndexOf('.'); + if (dotPos > 0 && field.charAt(dotPos + 1) == '_') { + // This is a subfield (e.g. prefix) of a complex field type. Skip it. + continue; + } Query existsQuery = context.getMapperService().fieldType(field).existsQuery(context); booleanQuery.add(existsQuery, Occur.SHOULD); } diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index a65ce3cbdd380..b3d163db8c222 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -247,6 +247,9 @@ protected QueryShardContext createQueryShardContext(MapperService mapperService) when(queryShardContext.getSearchQuoteAnalyzer(any())).thenCallRealMethod(); when(queryShardContext.getSearchAnalyzer(any())).thenCallRealMethod(); when(queryShardContext.getIndexSettings()).thenReturn(mapperService.getIndexSettings()); + when(queryShardContext.getObjectMapper(anyString())).thenAnswer( + inv -> mapperService.getObjectMapper(inv.getArguments()[0].toString()) + ); when(queryShardContext.simpleMatchToIndexNames(any())).thenAnswer( inv -> mapperService.simpleMatchToFullName(inv.getArguments()[0].toString()) ); From c25b00c6b62fd6766999c09fd06dd1f7b281b3eb Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Tue, 2 Apr 2024 00:11:11 +0000 Subject: [PATCH 28/56] Move changelog entry down to 2.x block (#13004) We need to update our PR template to suggest that people add things to "unreleased 2.x" unless they definitely want to leave things for 3.0. Signed-off-by: Michael Froh --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e8e39d3b5ee00..bc3acfc991e25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,7 +98,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Bug] Check phase name before SearchRequestOperationsListener onPhaseStart ([#12035](https://github.com/opensearch-project/OpenSearch/pull/12035)) - Fix Span operation names generated from RestActions ([#12005](https://github.com/opensearch-project/OpenSearch/pull/12005)) - Fix error in RemoteSegmentStoreDirectory when debug logging is enabled ([#12328](https://github.com/opensearch-project/OpenSearch/pull/12328)) -- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) ### Security @@ -128,6 +127,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix issue with feature flags where default value may not be honored ([#12849](https://github.com/opensearch-project/OpenSearch/pull/12849)) +- Fix UOE While building Exists query for nested search_as_you_type field ([#12048](https://github.com/opensearch-project/OpenSearch/pull/12048)) ### Security From 2dc071ff2546b0c6483b9e0a382badf9ac353cdc Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Mon, 1 Apr 2024 22:18:37 -0500 Subject: [PATCH 29/56] Detect breaking changes on pull requests (#12974) --- .github/workflows/detect-breaking-change.yml | 24 ++++++ CHANGELOG.md | 1 + server/build.gradle | 79 ++++++++++++++++++++ 3 files changed, 104 insertions(+) create mode 100644 .github/workflows/detect-breaking-change.yml diff --git a/.github/workflows/detect-breaking-change.yml b/.github/workflows/detect-breaking-change.yml new file mode 100644 index 0000000000000..fec605c58f9c7 --- /dev/null +++ b/.github/workflows/detect-breaking-change.yml @@ -0,0 +1,24 @@ +name: "Detect Breaking Changes" +on: [push, pull_request] +jobs: + detect-breaking-change: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: temurin # Temurin is a distribution of adoptium + java-version: 21 + - uses: gradle/gradle-build-action@v3 + with: + arguments: japicmp + gradle-version: 8.7 + build-root-directory: server + - if: failure() + run: cat server/build/reports/java-compatibility/report.txt + - if: failure() + uses: actions/upload-artifact@v4 + with: + name: java-compatibility-report.html + path: ${{ github.workspace }}/server/build/reports/java-compatibility/report.html + \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index bc3acfc991e25..d2b41e910762b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -109,6 +109,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) +- Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/build.gradle b/server/build.gradle index bbbe93bd6e517..7d52849844aaa 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -36,6 +36,7 @@ plugins { id('opensearch.publish') id('opensearch.internal-cluster-test') id('opensearch.optional-dependencies') + id('me.champeau.gradle.japicmp') version '0.4.2' } publishing { @@ -378,3 +379,81 @@ tasks.named("sourcesJar").configure { duplicatesStrategy = DuplicatesStrategy.EXCLUDE } } + +/** Compares the current build against a snapshot build */ +tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) { + oldClasspath.from(files("${buildDir}/snapshot/opensearch-${version}.jar")) + newClasspath.from(tasks.named('jar')) + onlyModified = true + failOnModification = true + ignoreMissingClasses = true + annotationIncludes = ['@org.opensearch.common.annotation.PublicApi'] + txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt") + htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html") + dependsOn downloadSnapshot +} + +/** If the Java API Comparison task failed, print a hint if the change should be merged from its target branch */ +gradle.taskGraph.afterTask { Task task, TaskState state -> + if (task.name == 'japicmp' && state.failure != null) { + def sha = getGitShaFromJar("${buildDir}/snapshot/opensearch-${version}.jar") + logger.info("Incompatiable java api from snapshot jar built off of commit ${sha}") + + if (!inHistory(sha)) { + logger.warn('\u001B[33mPlease merge from the target branch and run this task again.\u001B[0m') + } + } +} + +/** Downloads latest snapshot from maven repository */ +tasks.register("downloadSnapshot", Copy) { + def mavenSnapshotRepoUrl = "https://aws.oss.sonatype.org/content/repositories/snapshots/" + def groupId = "org.opensearch" + def artifactId = "opensearch" + + repositories { + maven { + url mavenSnapshotRepoUrl + } + } + + configurations { + snapshotArtifact + } + + dependencies { + snapshotArtifact("${groupId}:${artifactId}:${version}:") + } + + from configurations.snapshotArtifact + into "$buildDir/snapshot" +} + +/** Check if the sha is in the current history */ +def inHistory(String sha) { + try { + def commandCheckSha = "git merge-base --is-ancestor ${sha} HEAD" + commandCheckSha.execute() + return true + } catch (Exception) { + return false + } +} + +/** Extracts the Git SHA used to build a jar from its manifest */ +def getGitShaFromJar(String jarPath) { + def sha = '' + try { + // Open the JAR file + def jarFile = new java.util.jar.JarFile(jarPath) + // Get the manifest from the JAR file + def manifest = jarFile.manifest + def attributes = manifest.mainAttributes + // Assuming the Git SHA is stored under an attribute named 'Git-SHA' + sha = attributes.getValue('Change') + jarFile.close() + } catch (IOException e) { + println "Failed to read the JAR file: $e.message" + } + return sha +} From 3491bcb23d6b398117cfd11c5d273b2e83798d0b Mon Sep 17 00:00:00 2001 From: Arpit-Bandejiya Date: Tue, 2 Apr 2024 11:50:54 +0530 Subject: [PATCH 30/56] Add cluster primary balance contraint for rebalancing with buffer (#12656) Signed-off-by: Arpit-Bandejiya --- CHANGELOG.md | 1 + .../SegmentReplicationAllocationIT.java | 87 ++++++++- .../allocation/AllocationConstraints.java | 6 +- .../routing/allocation/ConstraintTypes.java | 15 +- .../allocation/RebalanceConstraints.java | 10 +- .../allocation/RebalanceParameter.java | 24 +++ .../allocator/BalancedShardsAllocator.java | 81 ++++++++- .../allocator/LocalShardsBalancer.java | 14 +- .../common/settings/ClusterSettings.java | 2 + .../allocation/BalanceConfigurationTests.java | 166 ++++++++++++++++-- 10 files changed, 370 insertions(+), 36 deletions(-) create mode 100644 server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java diff --git a/CHANGELOG.md b/CHANGELOG.md index d2b41e910762b..5371a8372c56d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) - Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) +- Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) ### Dependencies - Bump `org.apache.commons:commons-configuration2` from 2.10.0 to 2.10.1 ([#12896](https://github.com/opensearch-project/OpenSearch/pull/12896)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index 30edea6551067..669e24f9fb555 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -31,6 +31,9 @@ import java.util.stream.Collectors; import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE; +import static org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -58,6 +61,20 @@ public void enablePreferPrimaryBalance() { ); } + public void setAllocationRelocationStrategy(boolean preferPrimaryBalance, boolean preferPrimaryRebalance, float buffer) { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance) + .put(PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance) + .put(PRIMARY_SHARD_REBALANCE_BUFFER.getKey(), buffer) + ) + ); + } + /** * This test verifies that the overall primary balance is attained during allocation. This test verifies primary * balance per index and across all indices is maintained. @@ -87,7 +104,7 @@ public void testGlobalPrimaryAllocation() throws Exception { state = client().admin().cluster().prepareState().execute().actionGet().getState(); logger.info(ShardAllocations.printShardDistribution(state)); verifyPerIndexPrimaryBalance(); - verifyPrimaryBalance(); + verifyPrimaryBalance(0.0f); } /** @@ -224,6 +241,70 @@ public void testAllocationWithDisruption() throws Exception { verifyPerIndexPrimaryBalance(); } + /** + * Similar to testSingleIndexShardAllocation test but creates multiple indices, multiple nodes adding in and getting + * removed. The test asserts post each such event that primary shard distribution is balanced for each index as well as across the nodes + * when the PREFER_PRIMARY_SHARD_REBALANCE is set to true + */ + public void testAllocationAndRebalanceWithDisruption() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final int maxReplicaCount = 2; + final int maxShardCount = 2; + // Create higher number of nodes than number of shards to reduce chances of SameShardAllocationDecider kicking-in + // and preventing primary relocations + final int nodeCount = randomIntBetween(5, 10); + final int numberOfIndices = randomIntBetween(1, 10); + final float buffer = randomIntBetween(1, 4) * 0.10f; + + logger.info("--> Creating {} nodes", nodeCount); + final List nodeNames = new ArrayList<>(); + for (int i = 0; i < nodeCount; i++) { + nodeNames.add(internalCluster().startNode()); + } + setAllocationRelocationStrategy(true, true, buffer); + + int shardCount, replicaCount; + ClusterState state; + for (int i = 0; i < numberOfIndices; i++) { + shardCount = randomIntBetween(1, maxShardCount); + replicaCount = randomIntBetween(1, maxReplicaCount); + logger.info("--> Creating index test{} with primary {} and replica {}", i, shardCount, replicaCount); + createIndex("test" + i, shardCount, replicaCount, i % 2 == 0); + ensureGreen(TimeValue.timeValueSeconds(60)); + if (logger.isTraceEnabled()) { + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + } + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + final int additionalNodeCount = randomIntBetween(1, 5); + logger.info("--> Adding {} nodes", additionalNodeCount); + + internalCluster().startNodes(additionalNodeCount); + ensureGreen(TimeValue.timeValueSeconds(60)); + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + + int nodeCountToStop = additionalNodeCount; + while (nodeCountToStop > 0) { + internalCluster().stopRandomDataNode(); + // give replica a chance to promote as primary before terminating node containing the replica + ensureGreen(TimeValue.timeValueSeconds(60)); + nodeCountToStop--; + } + state = client().admin().cluster().prepareState().execute().actionGet().getState(); + logger.info("--> Cluster state post nodes stop {}", state); + logger.info(ShardAllocations.printShardDistribution(state)); + verifyPerIndexPrimaryBalance(); + verifyPrimaryBalance(buffer); + } + /** * Utility method which ensures cluster has balanced primary shard distribution across a single index. * @throws Exception exception @@ -263,7 +344,7 @@ private void verifyPerIndexPrimaryBalance() throws Exception { }, 60, TimeUnit.SECONDS); } - private void verifyPrimaryBalance() throws Exception { + private void verifyPrimaryBalance(float buffer) throws Exception { assertBusy(() -> { final ClusterState currentState = client().admin().cluster().prepareState().execute().actionGet().getState(); RoutingNodes nodes = currentState.getRoutingNodes(); @@ -278,7 +359,7 @@ private void verifyPrimaryBalance() throws Exception { .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer))); } }, 60, TimeUnit.SECONDS); } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java index 5375910c57579..6702db4b43e91 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationConstraints.java @@ -30,9 +30,9 @@ public class AllocationConstraints { public AllocationConstraints() { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); - this.constraints.putIfAbsent(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, new Constraint(isIndexShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPrimaryShardsPerNodeBreached(0.0f))); } public void updateAllocationConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java index ae2d4a0926194..08fe8f92d1f80 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/ConstraintTypes.java @@ -28,6 +28,11 @@ public class ConstraintTypes { */ public final static String CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID = "cluster.primary.shard.balance.constraint"; + /** + * Defines a cluster constraint which is breached when a node contains more than avg primary shards across all indices + */ + public final static String CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID = "cluster.primary.shard.rebalance.constraint"; + /** * Defines an index constraint which is breached when a node contains more than avg number of shards for an index */ @@ -70,14 +75,14 @@ public static Predicate isPerIndexPrimaryShardsPerN } /** - * Defines a predicate which returns true when a node contains more than average number of primary shards. This - * constraint is used in weight calculation during allocation only. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} - * is assigned to node resulting in lesser chances of node being selected as allocation target + * Defines a predicate which returns true when a node contains more than average number of primary shards with added buffer. This + * constraint is used in weight calculation during allocation/rebalance both. When breached a high weight {@link ConstraintTypes#CONSTRAINT_WEIGHT} + * is assigned to node resulting in lesser chances of node being selected as allocation/rebalance target */ - public static Predicate isPrimaryShardsPerNodeBreached() { + public static Predicate isPrimaryShardsPerNodeBreached(float buffer) { return (params) -> { int primaryShardCount = params.getNode().numPrimaryShards(); - int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode()); + int allowedPrimaryShardCount = (int) Math.ceil(params.getBalancer().avgPrimaryShardsPerNode() * (1 + buffer)); return primaryShardCount >= allowedPrimaryShardCount; }; } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java index a4036ec47ec0e..2c2138af18abc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceConstraints.java @@ -14,8 +14,10 @@ import java.util.HashMap; import java.util.Map; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPerIndexPrimaryShardsPerNodeBreached; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.isPrimaryShardsPerNodeBreached; /** * Constraints applied during rebalancing round; specify conditions which, if breached, reduce the @@ -27,9 +29,13 @@ public class RebalanceConstraints { private Map constraints; - public RebalanceConstraints() { + public RebalanceConstraints(RebalanceParameter rebalanceParameter) { this.constraints = new HashMap<>(); - this.constraints.putIfAbsent(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, new Constraint(isPerIndexPrimaryShardsPerNodeBreached())); + this.constraints.put( + CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, + new Constraint(isPrimaryShardsPerNodeBreached(rebalanceParameter.getPreferPrimaryBalanceBuffer())) + ); } public void updateRebalanceConstraint(String constraint, boolean enable) { diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java new file mode 100644 index 0000000000000..35fbaede93ba3 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/RebalanceParameter.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing.allocation; + +/** + * RebalanceConstraint Params + */ +public class RebalanceParameter { + private float preferPrimaryBalanceBuffer; + + public RebalanceParameter(float preferPrimaryBalanceBuffer) { + this.preferPrimaryBalanceBuffer = preferPrimaryBalanceBuffer; + } + + public float getPreferPrimaryBalanceBuffer() { + return preferPrimaryBalanceBuffer; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 41ace0e7661fe..b2443490dd973 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.routing.allocation.ConstraintTypes; import org.opensearch.cluster.routing.allocation.MoveDecision; import org.opensearch.cluster.routing.allocation.RebalanceConstraints; +import org.opensearch.cluster.routing.allocation.RebalanceParameter; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.ShardAllocationDecision; import org.opensearch.common.inject.Inject; @@ -61,6 +62,7 @@ import java.util.Set; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; +import static org.opensearch.cluster.routing.allocation.ConstraintTypes.CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID; import static org.opensearch.cluster.routing.allocation.ConstraintTypes.INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID; @@ -145,10 +147,29 @@ public class BalancedShardsAllocator implements ShardsAllocator { Property.NodeScope ); + public static final Setting PREFER_PRIMARY_SHARD_REBALANCE = Setting.boolSetting( + "cluster.routing.allocation.rebalance.primary.enable", + false, + Property.Dynamic, + Property.NodeScope + ); + + public static final Setting PRIMARY_SHARD_REBALANCE_BUFFER = Setting.floatSetting( + "cluster.routing.allocation.rebalance.primary.buffer", + 0.10f, + 0.0f, + Property.Dynamic, + Property.NodeScope + ); + private volatile boolean movePrimaryFirst; private volatile ShardMovementStrategy shardMovementStrategy; private volatile boolean preferPrimaryShardBalance; + private volatile boolean preferPrimaryShardRebalance; + private volatile float preferPrimaryShardRebalanceBuffer; + private volatile float indexBalanceFactor; + private volatile float shardBalanceFactor; private volatile WeightFunction weightFunction; private volatile float threshold; @@ -158,14 +179,21 @@ public BalancedShardsAllocator(Settings settings) { @Inject public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { - setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setShardBalanceFactor(SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setIndexBalanceFactor(INDEX_BALANCE_FACTOR_SETTING.get(settings)); + setPreferPrimaryShardRebalanceBuffer(PRIMARY_SHARD_REBALANCE_BUFFER.get(settings)); + updateWeightFunction(); setThreshold(THRESHOLD_SETTING.get(settings)); setPreferPrimaryShardBalance(PREFER_PRIMARY_SHARD_BALANCE.get(settings)); + setPreferPrimaryShardRebalance(PREFER_PRIMARY_SHARD_REBALANCE.get(settings)); setShardMovementStrategy(SHARD_MOVEMENT_STRATEGY_SETTING.get(settings)); clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_BALANCE, this::setPreferPrimaryShardBalance); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVE_PRIMARY_FIRST_SETTING, this::setMovePrimaryFirst); clusterSettings.addSettingsUpdateConsumer(SHARD_MOVEMENT_STRATEGY_SETTING, this::setShardMovementStrategy); - clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); + clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, this::updateIndexBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(SHARD_BALANCE_FACTOR_SETTING, this::updateShardBalanceFactor); + clusterSettings.addSettingsUpdateConsumer(PRIMARY_SHARD_REBALANCE_BUFFER, this::updatePreferPrimaryShardBalanceBuffer); + clusterSettings.addSettingsUpdateConsumer(PREFER_PRIMARY_SHARD_REBALANCE, this::setPreferPrimaryShardRebalance); clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); } @@ -190,8 +218,35 @@ private void setShardMovementStrategy(ShardMovementStrategy shardMovementStrateg } } - private void setWeightFunction(float indexBalance, float shardBalanceFactor) { - weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); + private void setIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + } + + private void setShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + } + + private void setPreferPrimaryShardRebalanceBuffer(float preferPrimaryShardRebalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardRebalanceBuffer; + } + + private void updateIndexBalanceFactor(float indexBalanceFactor) { + this.indexBalanceFactor = indexBalanceFactor; + updateWeightFunction(); + } + + private void updateShardBalanceFactor(float shardBalanceFactor) { + this.shardBalanceFactor = shardBalanceFactor; + updateWeightFunction(); + } + + private void updatePreferPrimaryShardBalanceBuffer(float preferPrimaryShardBalanceBuffer) { + this.preferPrimaryShardRebalanceBuffer = preferPrimaryShardBalanceBuffer; + updateWeightFunction(); + } + + private void updateWeightFunction() { + weightFunction = new WeightFunction(this.indexBalanceFactor, this.shardBalanceFactor, this.preferPrimaryShardRebalanceBuffer); } /** @@ -205,6 +260,11 @@ private void setPreferPrimaryShardBalance(boolean preferPrimaryShardBalance) { this.weightFunction.updateRebalanceConstraint(INDEX_PRIMARY_SHARD_BALANCE_CONSTRAINT_ID, preferPrimaryShardBalance); } + private void setPreferPrimaryShardRebalance(boolean preferPrimaryShardRebalance) { + this.preferPrimaryShardRebalance = preferPrimaryShardRebalance; + this.weightFunction.updateRebalanceConstraint(CLUSTER_PRIMARY_SHARD_REBALANCE_CONSTRAINT_ID, preferPrimaryShardRebalance); + } + private void setThreshold(float threshold) { this.threshold = threshold; } @@ -221,7 +281,8 @@ public void allocate(RoutingAllocation allocation) { shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); localShardsBalancer.allocateUnassigned(); localShardsBalancer.moveShards(); @@ -242,7 +303,8 @@ public ShardAllocationDecision decideShardAllocation(final ShardRouting shard, f shardMovementStrategy, weightFunction, threshold, - preferPrimaryShardBalance + preferPrimaryShardBalance, + preferPrimaryShardRebalance ); AllocateUnassignedDecision allocateUnassignedDecision = AllocateUnassignedDecision.NOT_TAKEN; MoveDecision moveDecision = MoveDecision.NOT_TAKEN; @@ -348,7 +410,7 @@ static class WeightFunction { private AllocationConstraints constraints; private RebalanceConstraints rebalanceConstraints; - WeightFunction(float indexBalance, float shardBalance) { + WeightFunction(float indexBalance, float shardBalance, float preferPrimaryBalanceBuffer) { float sum = indexBalance + shardBalance; if (sum <= 0.0f) { throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); @@ -357,8 +419,9 @@ static class WeightFunction { theta1 = indexBalance / sum; this.indexBalance = indexBalance; this.shardBalance = shardBalance; + RebalanceParameter rebalanceParameter = new RebalanceParameter(preferPrimaryBalanceBuffer); this.constraints = new AllocationConstraints(); - this.rebalanceConstraints = new RebalanceConstraints(); + this.rebalanceConstraints = new RebalanceConstraints(rebalanceParameter); // Enable index shard per node breach constraint updateAllocationConstraint(INDEX_SHARD_PER_NODE_BREACH_CONSTRAINT_ID, true); } @@ -495,7 +558,7 @@ public Balancer( float threshold, boolean preferPrimaryBalance ) { - super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance); + super(logger, allocation, shardMovementStrategy, weight, threshold, preferPrimaryBalance, false); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 45f64a5b29b04..ec25d041bda43 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -61,6 +61,7 @@ public class LocalShardsBalancer extends ShardsBalancer { private final ShardMovementStrategy shardMovementStrategy; private final boolean preferPrimaryBalance; + private final boolean preferPrimaryRebalance; private final BalancedShardsAllocator.WeightFunction weight; private final float threshold; @@ -76,7 +77,8 @@ public LocalShardsBalancer( ShardMovementStrategy shardMovementStrategy, BalancedShardsAllocator.WeightFunction weight, float threshold, - boolean preferPrimaryBalance + boolean preferPrimaryBalance, + boolean preferPrimaryRebalance ) { this.logger = logger; this.allocation = allocation; @@ -91,6 +93,7 @@ public LocalShardsBalancer( sorter = newNodeSorter(); inEligibleTargetNode = new HashSet<>(); this.preferPrimaryBalance = preferPrimaryBalance; + this.preferPrimaryRebalance = preferPrimaryRebalance; this.shardMovementStrategy = shardMovementStrategy; } @@ -995,13 +998,18 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala continue; } // This is a safety net which prevents un-necessary primary shard relocations from maxNode to minNode when - // doing such relocation wouldn't help in primary balance. + // doing such relocation wouldn't help in primary balance. The condition won't be applicable when we enable node level + // primary rebalance if (preferPrimaryBalance == true + && preferPrimaryRebalance == false && shard.primary() && maxNode.numPrimaryShards(shard.getIndexName()) - minNode.numPrimaryShards(shard.getIndexName()) < 2) { continue; } - + // Relax the above condition to per node to allow rebalancing to attain global balance + if (preferPrimaryRebalance == true && shard.primary() && maxNode.numPrimaryShards() - minNode.numPrimaryShards() < 2) { + continue; + } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); maxNode.removeShard(shard); long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 8760ee3c94309..1b529a3b2bef1 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -252,7 +252,9 @@ public void apply(Settings value, Settings current, Settings previous) { AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING, BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.PRIMARY_SHARD_REBALANCE_BUFFER, BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE, + BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE, BalancedShardsAllocator.SHARD_MOVE_PRIMARY_FIRST_SETTING, BalancedShardsAllocator.SHARD_MOVEMENT_STRATEGY_SETTING, BalancedShardsAllocator.THRESHOLD_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java index 62dce9c4edeb5..11cbe89645657 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -72,6 +72,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -140,10 +141,14 @@ public void testIndexBalance() { } private Settings.Builder getSettingsBuilderForPrimaryBalance() { - return getSettingsBuilderForPrimaryBalance(true); + return getSettingsBuilderForPrimaryBalance(true, false); } - private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrimaryBalance) { + private Settings.Builder getSettingsBuilderForPrimaryReBalance() { + return getSettingsBuilderForPrimaryBalance(true, true); + } + + private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrimaryBalance, boolean preferPrimaryRebalance) { final float indexBalance = 0.55f; final float shardBalance = 0.45f; final float balanceThreshold = 1.0f; @@ -155,6 +160,7 @@ private Settings.Builder getSettingsBuilderForPrimaryBalance(boolean preferPrima ); settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_BALANCE.getKey(), preferPrimaryBalance); + settings.put(BalancedShardsAllocator.PREFER_PRIMARY_SHARD_REBALANCE.getKey(), preferPrimaryRebalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); return settings; @@ -201,7 +207,7 @@ public void testPrimaryBalanceWithoutPreferPrimaryBalanceSetting() { int balanceFailed = 0; AllocationService strategy = createAllocationService( - getSettingsBuilderForPrimaryBalance(false).build(), + getSettingsBuilderForPrimaryBalance(false, false).build(), new TestGatewayAllocator() ); for (int i = 0; i < numberOfRuns; i++) { @@ -244,6 +250,60 @@ public void testPrimaryBalanceWithPreferPrimaryBalanceSetting() { assertTrue(balanceFailed <= 1); } + /** + * This test verifies primary shard balance is attained setting. + */ + public void testPrimaryBalanceNotSolvedForNodeDropWithPreferPrimaryBalanceSetting() { + final int numberOfNodes = 4; + final int numberOfIndices = 4; + final int numberOfShards = 4; + final int numberOfReplicas = 1; + final int numberOfRuns = 5; + final float buffer = 0.10f; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPrimaryBalance(clusterState, buffer); + } catch (AssertionError | Exception e) { + balanceFailed++; + logger.info("Unexpected assertion failure"); + } + } + assertTrue(balanceFailed >= 4); + } + + /** + * This test verifies primary shard balance is attained with PREFER_PRIMARY_SHARD_BALANCE setting. + */ + public void testPrimaryBalanceSolvedWithPreferPrimaryRebalanceSetting() { + final int numberOfNodes = 4; + final int numberOfIndices = 4; + final int numberOfShards = 4; + final int numberOfReplicas = 1; + final int numberOfRuns = 5; + final float buffer = 0.10f; + int balanceFailed = 0; + + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryReBalance().build(), new TestGatewayAllocator()); + for (int i = 0; i < numberOfRuns; i++) { + ClusterState clusterState = initCluster(strategy, numberOfIndices, numberOfNodes, numberOfShards, numberOfReplicas); + clusterState = removeOneNode(clusterState, strategy); + logger.info(ShardAllocations.printShardDistribution(clusterState)); + try { + verifyPrimaryBalance(clusterState, buffer); + } catch (Exception e) { + balanceFailed++; + logger.info("Unexpected assertion failure"); + } + } + assertTrue(balanceFailed <= 1); + } + /** * This test verifies the allocation logic when nodes breach multiple constraints and ensure node breaching min * constraints chosen for allocation. @@ -367,8 +427,7 @@ public void testPrimaryBalanceWithContrainstBreaching() { */ public void testGlobalPrimaryBalance() throws Exception { AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryBalance().build(), new TestGatewayAllocator()); - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .build(); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); clusterState = addNode(clusterState, strategy); clusterState = addNode(clusterState, strategy); @@ -377,7 +436,30 @@ public void testGlobalPrimaryBalance() throws Exception { clusterState = addIndex(clusterState, strategy, "test-index3", 1, 1); logger.info(ShardAllocations.printShardDistribution(clusterState)); - verifyPrimaryBalance(clusterState); + verifyPrimaryBalance(clusterState, 0.0f); + } + + /** + * This test verifies global balance by creating indices iteratively and verify primary shards do not pile up on one + * @throws Exception generic exception + */ + public void testGlobalPrimaryBalanceWithNodeDrops() throws Exception { + final float buffer = 0.10f; + AllocationService strategy = createAllocationService(getSettingsBuilderForPrimaryReBalance().build(), new TestGatewayAllocator()); + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).build(); + clusterState = addNodes(clusterState, strategy, 5); + + clusterState = addIndices(clusterState, strategy, 5, 1, 8); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + verifyPrimaryBalance(clusterState, buffer); + + clusterState = removeOneNode(clusterState, strategy); + + clusterState = applyAllocationUntilNoChange(clusterState, strategy); + + logger.info(ShardAllocations.printShardDistribution(clusterState)); + verifyPrimaryBalance(clusterState, buffer); } /** @@ -537,7 +619,7 @@ private void verifyPerIndexPrimaryBalance(ClusterState currentState) { } } - private void verifyPrimaryBalance(ClusterState clusterState) throws Exception { + private void verifySkewedPrimaryBalance(ClusterState clusterState, int delta) throws Exception { assertBusy(() -> { RoutingNodes nodes = clusterState.getRoutingNodes(); int totalPrimaryShards = 0; @@ -545,13 +627,36 @@ private void verifyPrimaryBalance(ClusterState clusterState) throws Exception { totalPrimaryShards += index.primaryShardsActive(); } final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / clusterState.getRoutingNodes().size()); + int maxPrimaryShardOnNode = Integer.MIN_VALUE; + int minPrimaryShardOnNode = Integer.MAX_VALUE; for (RoutingNode node : nodes) { final int primaryCount = node.shardsWithState(STARTED) .stream() .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - assertTrue(primaryCount <= avgPrimaryShardsPerNode); + maxPrimaryShardOnNode = Math.max(maxPrimaryShardOnNode, primaryCount); + minPrimaryShardOnNode = Math.min(minPrimaryShardOnNode, primaryCount); + } + assertTrue(maxPrimaryShardOnNode - minPrimaryShardOnNode < delta); + }, 60, TimeUnit.SECONDS); + } + + private void verifyPrimaryBalance(ClusterState clusterState, float buffer) throws Exception { + assertBusy(() -> { + RoutingNodes nodes = clusterState.getRoutingNodes(); + int totalPrimaryShards = 0; + for (final IndexRoutingTable index : clusterState.getRoutingTable().indicesRouting().values()) { + totalPrimaryShards += index.primaryShardsActive(); + } + final int avgPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / clusterState.getRoutingNodes().size()); + for (RoutingNode node : nodes) { + final int primaryCount = node.shardsWithState(STARTED) + .stream() + .filter(ShardRouting::primary) + .collect(Collectors.toList()) + .size(); + assertTrue(primaryCount <= (avgPrimaryShardsPerNode * (1 + buffer))); } }, 60, TimeUnit.SECONDS); } @@ -567,8 +672,8 @@ public void testShardBalance() { ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString() ); - settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), shardBalance); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceThreshold); AllocationService strategy = createAllocationService(settings.build(), new TestGatewayAllocator()); @@ -634,6 +739,34 @@ private ClusterState addIndex( return applyAllocationUntilNoChange(clusterState, strategy); } + private ClusterState addIndices( + ClusterState clusterState, + AllocationService strategy, + int numberOfShards, + int numberOfReplicas, + int numberOfIndices + ) { + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.getMetadata()); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + + for (int i = 0; i < numberOfIndices; i++) { + IndexMetadata.Builder index = IndexMetadata.builder("test" + i) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(numberOfReplicas); + + metadataBuilder = metadataBuilder.put(index); + routingTableBuilder.addAsNew(index.build()); + } + + clusterState = ClusterState.builder(clusterState) + .metadata(metadataBuilder.build()) + .routingTable(routingTableBuilder.build()) + .build(); + clusterState = strategy.reroute(clusterState, "indices-created"); + return applyAllocationUntilNoChange(clusterState, strategy); + } + private ClusterState initCluster( AllocationService strategy, int numberOfIndices, @@ -664,7 +797,7 @@ private ClusterState initCluster( for (int i = 0; i < numberOfNodes; i++) { nodes.add(newNode("node" + i)); } - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(nodes) .metadata(metadata) .routingTable(initialRoutingTable) @@ -673,6 +806,17 @@ private ClusterState initCluster( return applyAllocationUntilNoChange(clusterState, strategy); } + private ClusterState addNodes(ClusterState clusterState, AllocationService strategy, int numberOfNodes) { + logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numberOfNodes); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + for (int i = 0; i < numberOfNodes; i++) { + nodes.add(newNode("node" + (clusterState.nodes().getSize() + i))); + } + clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + return applyStartedShardsUntilNoChange(clusterState, strategy); + } + private ClusterState addNode(ClusterState clusterState, AllocationService strategy) { logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always"); clusterState = ClusterState.builder(clusterState) @@ -918,7 +1062,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing nodes.add(node); } - ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + ClusterState clusterState = ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(nodes) .metadata(metadata) .routingTable(routingTable) From 170ea7af0ac02057dae18b31e81ae149b001532c Mon Sep 17 00:00:00 2001 From: Ashish Date: Tue, 2 Apr 2024 16:17:07 +0530 Subject: [PATCH 31/56] Introduce remote store hash algo in customData in IndexMetadata (#12986) Signed-off-by: Ashish Singh --- .../remotestore/RemoteRestoreSnapshotIT.java | 22 +-- .../metadata/MetadataCreateIndexService.java | 34 ++-- .../org/opensearch/common/hash/FNV1a.java | 48 +++++ .../org/opensearch/index/IndexService.java | 2 +- .../org/opensearch/index/IndexSettings.java | 17 +- .../index/remote/RemoteStoreDataEnums.java | 69 ------- .../index/remote/RemoteStoreEnums.java | 187 ++++++++++++++++++ .../index/remote/RemoteStorePathStrategy.java | 152 ++++++++++++++ ...a => RemoteStorePathStrategyResolver.java} | 16 +- .../index/remote/RemoteStorePathType.java | 75 ------- .../opensearch/index/shard/IndexShard.java | 4 +- .../opensearch/index/shard/StoreRecovery.java | 6 +- .../store/RemoteSegmentStoreDirectory.java | 6 +- .../RemoteSegmentStoreDirectoryFactory.java | 34 +++- .../RemoteStoreLockManagerFactory.java | 27 ++- .../index/translog/RemoteFsTranslog.java | 40 ++-- .../opensearch/indices/IndicesService.java | 8 +- .../blobstore/BlobStoreRepository.java | 16 +- .../opensearch/snapshots/RestoreService.java | 2 +- .../MetadataCreateIndexServiceTests.java | 27 ++- .../opensearch/common/hashing/FNV1aTests.java | 48 +++++ ...eTests.java => RemoteStoreEnumsTests.java} | 89 ++++++--- .../RemoteSegmentStoreDirectoryTests.java | 13 +- .../RemoteStoreLockManagerFactoryTests.java | 7 +- .../index/translog/RemoteFsTranslogTests.java | 2 +- .../TranslogTransferManagerTests.java | 4 +- 26 files changed, 685 insertions(+), 270 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/hash/FNV1a.java delete mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java create mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java rename server/src/main/java/org/opensearch/index/remote/{RemoteStorePathTypeResolver.java => RemoteStorePathStrategyResolver.java} (54%) delete mode 100644 server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java create mode 100644 server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java rename server/src/test/java/org/opensearch/index/remote/{RemoteStorePathTypeTests.java => RemoteStoreEnumsTests.java} (51%) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index fff99e65054dc..181f242aecd09 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -27,7 +27,8 @@ import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.common.ReplicationType; @@ -283,7 +284,7 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { indexDocuments(client, indexName1, randomIntBetween(5, 10)); ensureGreen(indexName1); - validateRemoteStorePathType(indexName1, RemoteStorePathType.FIXED); + validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); logger.info("--> snapshot"); SnapshotInfo snapshotInfo = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); @@ -300,14 +301,12 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version1); - validateRemoteStorePathType(restoredIndexName1version1, RemoteStorePathType.FIXED); + validatePathType(restoredIndexName1version1, PathType.FIXED, PathHashAlgorithm.FNV_1A); client(clusterManagerNode).admin() .cluster() .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), RemoteStorePathType.HASHED_PREFIX) - ) + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), PathType.HASHED_PREFIX)) .get(); restoreSnapshotResponse = client.admin() @@ -319,24 +318,25 @@ public void testRemoteStoreCustomDataOnIndexCreationAndRestore() { .get(); assertEquals(RestStatus.ACCEPTED, restoreSnapshotResponse.status()); ensureGreen(restoredIndexName1version2); - validateRemoteStorePathType(restoredIndexName1version2, RemoteStorePathType.HASHED_PREFIX); + validatePathType(restoredIndexName1version2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); // Create index with cluster setting cluster.remote_store.index.path.prefix.type as hashed_prefix. indexSettings = getIndexSettings(1, 0).build(); createIndex(indexName2, indexSettings); ensureGreen(indexName2); - validateRemoteStorePathType(indexName2, RemoteStorePathType.HASHED_PREFIX); + validatePathType(indexName2, PathType.HASHED_PREFIX, PathHashAlgorithm.FNV_1A); // Validating that custom data has not changed for indexes which were created before the cluster setting got updated - validateRemoteStorePathType(indexName1, RemoteStorePathType.FIXED); + validatePathType(indexName1, PathType.FIXED, PathHashAlgorithm.FNV_1A); } - private void validateRemoteStorePathType(String index, RemoteStorePathType pathType) { + private void validatePathType(String index, PathType pathType, PathHashAlgorithm pathHashAlgorithm) { ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); // Validate that the remote_store custom data is present in index metadata for the created index. Map remoteCustomData = state.metadata().index(index).getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); assertNotNull(remoteCustomData); - assertEquals(pathType.toString(), remoteCustomData.get(RemoteStorePathType.NAME)); + assertEquals(pathType.name(), remoteCustomData.get(PathType.NAME)); + assertEquals(pathHashAlgorithm.name(), remoteCustomData.get(PathHashAlgorithm.NAME)); } public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index aee0473be95eb..451871b10d5eb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -88,8 +88,10 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.MapperService.MergeReason; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathType; -import org.opensearch.index.remote.RemoteStorePathTypeResolver; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; +import org.opensearch.index.remote.RemoteStorePathStrategyResolver; import org.opensearch.index.shard.IndexSettingProvider; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; @@ -171,7 +173,7 @@ public class MetadataCreateIndexService { private AwarenessReplicaBalance awarenessReplicaBalance; @Nullable - private final RemoteStorePathTypeResolver remoteStorePathTypeResolver; + private final RemoteStorePathStrategyResolver remoteStorePathStrategyResolver; public MetadataCreateIndexService( final Settings settings, @@ -204,8 +206,8 @@ public MetadataCreateIndexService( // Task is onboarded for throttling, it will get retried from associated TransportClusterManagerNodeAction. createIndexTaskKey = clusterService.registerClusterManagerTask(ClusterManagerTaskKeys.CREATE_INDEX_KEY, true); - remoteStorePathTypeResolver = isRemoteDataAttributePresent(settings) - ? new RemoteStorePathTypeResolver(clusterService.getClusterSettings()) + remoteStorePathStrategyResolver = isRemoteDataAttributePresent(settings) + ? new RemoteStorePathStrategyResolver(clusterService.getClusterSettings()) : null; } @@ -554,7 +556,7 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( tmpImdBuilder.setRoutingNumShards(routingNumShards); tmpImdBuilder.settings(indexSettings); tmpImdBuilder.system(isSystem); - addRemoteStorePathTypeInCustomData(tmpImdBuilder, true); + addRemoteStorePathStrategyInCustomData(tmpImdBuilder, true); // Set up everything, now locally create the index to see that things are ok, and apply IndexMetadata tempMetadata = tmpImdBuilder.build(); @@ -569,8 +571,8 @@ IndexMetadata buildAndValidateTemporaryIndexMetadata( * @param tmpImdBuilder index metadata builder. * @param assertNullOldType flag to verify that the old remote store path type is null */ - public void addRemoteStorePathTypeInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { - if (remoteStorePathTypeResolver != null) { + public void addRemoteStorePathStrategyInCustomData(IndexMetadata.Builder tmpImdBuilder, boolean assertNullOldType) { + if (remoteStorePathStrategyResolver != null) { // It is possible that remote custom data exists already. In such cases, we need to only update the path type // in the remote store custom data map. Map existingRemoteCustomData = tmpImdBuilder.removeCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); @@ -578,10 +580,18 @@ public void addRemoteStorePathTypeInCustomData(IndexMetadata.Builder tmpImdBuild ? new HashMap<>() : new HashMap<>(existingRemoteCustomData); // Determine the path type for use using the remoteStorePathResolver. - String newPathType = remoteStorePathTypeResolver.getType().toString(); - String oldPathType = remoteCustomData.put(RemoteStorePathType.NAME, newPathType); - assert !assertNullOldType || Objects.isNull(oldPathType); - logger.trace(() -> new ParameterizedMessage("Added new path type {}, replaced old path type {}", newPathType, oldPathType)); + RemoteStorePathStrategy newPathStrategy = remoteStorePathStrategyResolver.get(); + String oldPathType = remoteCustomData.put(PathType.NAME, newPathStrategy.getType().name()); + String oldHashAlgorithm = remoteCustomData.put(PathHashAlgorithm.NAME, newPathStrategy.getHashAlgorithm().name()); + assert !assertNullOldType || (Objects.isNull(oldPathType) && Objects.isNull(oldHashAlgorithm)); + logger.trace( + () -> new ParameterizedMessage( + "Added newPathStrategy={}, replaced oldPathType={} oldHashAlgorithm={}", + newPathStrategy, + oldPathType, + oldHashAlgorithm + ) + ); tmpImdBuilder.putCustom(IndexMetadata.REMOTE_STORE_CUSTOM_KEY, remoteCustomData); } } diff --git a/server/src/main/java/org/opensearch/common/hash/FNV1a.java b/server/src/main/java/org/opensearch/common/hash/FNV1a.java new file mode 100644 index 0000000000000..cab28d0f2d68f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/hash/FNV1a.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hash; + +import java.nio.charset.StandardCharsets; + +/** + * Provides hashing function using FNV1a hash function. @see FNV author's website. + * 32 bit Java port of http://www.isthe.com/chongo/src/fnv/hash_32a.c + * 64 bit Java port of http://www.isthe.com/chongo/src/fnv/hash_64a.c + * + * @opensearch.internal + */ +public class FNV1a { + private static final long FNV_OFFSET_BASIS_32 = 0x811c9dc5L; + private static final long FNV_PRIME_32 = 0x01000193L; + + private static final long FNV_OFFSET_BASIS_64 = 0xcbf29ce484222325L; + private static final long FNV_PRIME_64 = 0x100000001b3L; + + // FNV-1a hash computation for 32-bit hash + public static long hash32(String input) { + long hash = FNV_OFFSET_BASIS_32; + byte[] bytes = input.getBytes(StandardCharsets.UTF_8); + for (byte b : bytes) { + hash ^= (b & 0xFF); + hash *= FNV_PRIME_32; + } + return hash; + } + + // FNV-1a hash computation for 64-bit hash + public static long hash64(String input) { + long hash = FNV_OFFSET_BASIS_64; + byte[] bytes = input.getBytes(StandardCharsets.UTF_8); + for (byte b : bytes) { + hash ^= (b & 0xFF); + hash *= FNV_PRIME_64; + } + return hash; + } +} diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 03cf8f9182211..9ded1b174d4c6 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -509,7 +509,7 @@ public synchronized IndexShard createShard( RemoteStoreNodeAttribute.getRemoteStoreSegmentRepo(this.indexSettings.getNodeSettings()), this.indexSettings.getUUID(), shardId, - this.indexSettings.getRemoteStorePathType() + this.indexSettings.getRemoteStorePathStrategy() ); } remoteStore = new Store(shardId, this.indexSettings, remoteDirectory, lock, Store.OnClose.EMPTY, path); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 7e3d812974c79..f17a254c4d3cd 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -48,7 +48,9 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.ingest.IngestService; @@ -1908,10 +1910,15 @@ public void setDocIdFuzzySetFalsePositiveProbability(double docIdFuzzySetFalsePo this.docIdFuzzySetFalsePositiveProbability = docIdFuzzySetFalsePositiveProbability; } - public RemoteStorePathType getRemoteStorePathType() { + public RemoteStorePathStrategy getRemoteStorePathStrategy() { Map remoteCustomData = indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY); - return remoteCustomData != null && remoteCustomData.containsKey(RemoteStorePathType.NAME) - ? RemoteStorePathType.parseString(remoteCustomData.get(RemoteStorePathType.NAME)) - : RemoteStorePathType.FIXED; + if (remoteCustomData != null + && remoteCustomData.containsKey(PathType.NAME) + && remoteCustomData.containsKey(PathHashAlgorithm.NAME)) { + PathType pathType = PathType.parseString(remoteCustomData.get(PathType.NAME)); + PathHashAlgorithm pathHashAlgorithm = PathHashAlgorithm.parseString(remoteCustomData.get(PathHashAlgorithm.NAME)); + return new RemoteStorePathStrategy(pathType, pathHashAlgorithm); + } + return new RemoteStorePathStrategy(PathType.FIXED); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java deleted file mode 100644 index 475e29004ba2e..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreDataEnums.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.common.annotation.PublicApi; - -import java.util.Set; - -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; - -/** - * This class contains the different enums related to remote store data categories and types. - * - * @opensearch.api - */ -public class RemoteStoreDataEnums { - - /** - * Categories of the data in Remote store. - */ - @PublicApi(since = "2.14.0") - public enum DataCategory { - SEGMENTS("segments", Set.of(DataType.values())), - TRANSLOG("translog", Set.of(DATA, METADATA)); - - private final String name; - private final Set supportedDataTypes; - - DataCategory(String name, Set supportedDataTypes) { - this.name = name; - this.supportedDataTypes = supportedDataTypes; - } - - public boolean isSupportedDataType(DataType dataType) { - return supportedDataTypes.contains(dataType); - } - - public String getName() { - return name; - } - } - - /** - * Types of data in remote store. - */ - @PublicApi(since = "2.14.0") - public enum DataType { - DATA("data"), - METADATA("metadata"), - LOCK_FILES("lock_files"); - - private final String name; - - DataType(String name) { - this.name = name; - } - - public String getName() { - return name; - } - } -} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java new file mode 100644 index 0000000000000..4e557d8c24431 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.hash.FNV1a; +import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; + +import java.util.Locale; +import java.util.Set; + +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; + +/** + * This class contains the different enums related to remote store like data categories and types, path types + * and hashing algorithm. + * + * @opensearch.api + */ +public class RemoteStoreEnums { + + /** + * Categories of the data in Remote store. + */ + @PublicApi(since = "2.14.0") + public enum DataCategory { + SEGMENTS("segments", Set.of(DataType.values())), + TRANSLOG("translog", Set.of(DATA, METADATA)); + + private final String name; + private final Set supportedDataTypes; + + DataCategory(String name, Set supportedDataTypes) { + this.name = name; + this.supportedDataTypes = supportedDataTypes; + } + + public boolean isSupportedDataType(DataType dataType) { + return supportedDataTypes.contains(dataType); + } + + public String getName() { + return name; + } + } + + /** + * Types of data in remote store. + */ + @PublicApi(since = "2.14.0") + public enum DataType { + DATA("data"), + METADATA("metadata"), + LOCK_FILES("lock_files"); + + private final String name; + + DataType(String name) { + this.name = name; + } + + public String getName() { + return name; + } + } + + /** + * Enumerates the types of remote store paths resolution techniques supported by OpenSearch. + * For more information, see Github issue #12567. + */ + @PublicApi(since = "2.14.0") + public enum PathType { + FIXED { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + // Hash algorithm is not used in FIXED path type + return pathInput.basePath() + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return false; + } + }, + HASHED_PREFIX { + @Override + public BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. + // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. + return pathInput.basePath() + .add(pathInput.indexUUID()) + .add(pathInput.shardId()) + .add(pathInput.dataCategory().getName()) + .add(pathInput.dataType().getName()); + } + + @Override + boolean requiresHashAlgorithm() { + return true; + } + }; + + /** + * This method generates the path for the given path input which constitutes multiple fields and characteristics + * of the data. + * + * @param pathInput input. + * @param hashAlgorithm hashing algorithm. + * @return the blob path for the path input. + */ + public BlobPath path(PathInput pathInput, PathHashAlgorithm hashAlgorithm) { + DataCategory dataCategory = pathInput.dataCategory(); + DataType dataType = pathInput.dataType(); + assert dataCategory.isSupportedDataType(dataType) : "category:" + + dataCategory + + " type:" + + dataType + + " are not supported together"; + return generatePath(pathInput, hashAlgorithm); + } + + abstract BlobPath generatePath(PathInput pathInput, PathHashAlgorithm hashAlgorithm); + + abstract boolean requiresHashAlgorithm(); + + public static PathType parseString(String pathType) { + try { + return PathType.valueOf(pathType.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null + throw new IllegalArgumentException("Could not parse PathType for [" + pathType + "]"); + } + } + + /** + * This string is used as key for storing information in the custom data in index settings. + */ + public static final String NAME = "path_type"; + + } + + /** + * Type of hashes supported for path types that have hashing. + */ + @PublicApi(since = "2.14.0") + public enum PathHashAlgorithm { + + FNV_1A { + @Override + long hash(PathInput pathInput) { + String input = pathInput.indexUUID() + pathInput.shardId() + pathInput.dataCategory().getName() + pathInput.dataType() + .getName(); + return FNV1a.hash32(input); + } + }; + + abstract long hash(PathInput pathInput); + + public static PathHashAlgorithm parseString(String pathHashAlgorithm) { + try { + return PathHashAlgorithm.valueOf(pathHashAlgorithm.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException | NullPointerException e) { + // IllegalArgumentException is thrown when the input does not match any enum name + // NullPointerException is thrown when the input is null + throw new IllegalArgumentException("Could not parse PathHashAlgorithm for [" + pathHashAlgorithm + "]"); + } + } + + /** + * This string is used as key for storing information in the custom data in index settings. + */ + public static final String NAME = "path_hash_algorithm"; + } +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java new file mode 100644 index 0000000000000..ce5a6748fd9d4 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java @@ -0,0 +1,152 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.common.Nullable; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; + +import java.util.Objects; + +/** + * This class wraps internal details on the remote store path for an index. + * + * @opensearch.internal + */ +@PublicApi(since = "2.14.0") +public class RemoteStorePathStrategy { + + private final PathType type; + + @Nullable + private final PathHashAlgorithm hashAlgorithm; + + public RemoteStorePathStrategy(PathType type) { + this(type, null); + } + + public RemoteStorePathStrategy(PathType type, PathHashAlgorithm hashAlgorithm) { + assert type.requiresHashAlgorithm() == false || Objects.nonNull(hashAlgorithm); + this.type = Objects.requireNonNull(type); + this.hashAlgorithm = hashAlgorithm; + } + + public PathType getType() { + return type; + } + + public PathHashAlgorithm getHashAlgorithm() { + return hashAlgorithm; + } + + @Override + public String toString() { + return "RemoteStorePathStrategy{" + "type=" + type + ", hashAlgorithm=" + hashAlgorithm + '}'; + } + + public BlobPath generatePath(PathInput pathInput) { + return type.generatePath(pathInput, hashAlgorithm); + } + + /** + * Wrapper class for the input required to generate path for remote store uploads. + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + public static class PathInput { + private final BlobPath basePath; + private final String indexUUID; + private final String shardId; + private final DataCategory dataCategory; + private final DataType dataType; + + public PathInput(BlobPath basePath, String indexUUID, String shardId, DataCategory dataCategory, DataType dataType) { + this.basePath = Objects.requireNonNull(basePath); + this.indexUUID = Objects.requireNonNull(indexUUID); + this.shardId = Objects.requireNonNull(shardId); + this.dataCategory = Objects.requireNonNull(dataCategory); + this.dataType = Objects.requireNonNull(dataType); + } + + BlobPath basePath() { + return basePath; + } + + String indexUUID() { + return indexUUID; + } + + String shardId() { + return shardId; + } + + DataCategory dataCategory() { + return dataCategory; + } + + DataType dataType() { + return dataType; + } + + /** + * Returns a new builder for {@link PathInput}. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for {@link PathInput}. + * + * @opensearch.internal + */ + @PublicApi(since = "2.14.0") + public static class Builder { + private BlobPath basePath; + private String indexUUID; + private String shardId; + private DataCategory dataCategory; + private DataType dataType; + + public Builder basePath(BlobPath basePath) { + this.basePath = basePath; + return this; + } + + public Builder indexUUID(String indexUUID) { + this.indexUUID = indexUUID; + return this; + } + + public Builder shardId(String shardId) { + this.shardId = shardId; + return this; + } + + public Builder dataCategory(DataCategory dataCategory) { + this.dataCategory = dataCategory; + return this; + } + + public Builder dataType(DataType dataType) { + this.dataType = dataType; + return this; + } + + public PathInput build() { + return new PathInput(basePath, indexUUID, shardId, dataCategory, dataType); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java similarity index 54% rename from server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java rename to server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java index 5d014c9862d45..20fc516132220 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathTypeResolver.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategyResolver.java @@ -9,27 +9,29 @@ package org.opensearch.index.remote; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.indices.IndicesService; /** - * Determines the {@link RemoteStorePathType} at the time of index metadata creation. + * Determines the {@link RemoteStorePathStrategy} at the time of index metadata creation. * * @opensearch.internal */ -public class RemoteStorePathTypeResolver { +public class RemoteStorePathStrategyResolver { - private volatile RemoteStorePathType type; + private volatile PathType type; - public RemoteStorePathTypeResolver(ClusterSettings clusterSettings) { + public RemoteStorePathStrategyResolver(ClusterSettings clusterSettings) { type = clusterSettings.get(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING); clusterSettings.addSettingsUpdateConsumer(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING, this::setType); } - public RemoteStorePathType getType() { - return type; + public RemoteStorePathStrategy get() { + return new RemoteStorePathStrategy(type, PathHashAlgorithm.FNV_1A); } - public void setType(RemoteStorePathType type) { + private void setType(PathType type) { this.type = type; } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java deleted file mode 100644 index 742d7b501f227..0000000000000 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathType.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.remote; - -import org.opensearch.common.annotation.PublicApi; -import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; - -import java.util.Locale; - -/** - * Enumerates the types of remote store paths resolution techniques supported by OpenSearch. - * For more information, see Github issue #12567. - * - * @opensearch.internal - */ -@PublicApi(since = "2.14.0") -public enum RemoteStorePathType { - - FIXED { - @Override - public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { - return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); - } - }, - HASHED_PREFIX { - @Override - public BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType) { - // TODO - We need to implement this, keeping the same path as Fixed for sake of multiple tests that can fail otherwise. - // throw new UnsupportedOperationException("Not implemented"); --> Not using this for unblocking couple of tests. - return basePath.add(indexUUID).add(shardId).add(dataCategory).add(dataType); - } - }; - - /** - * @param basePath base path of the underlying blob store repository - * @param indexUUID of the index - * @param shardId shard id - * @param dataCategory is either translog or segment - * @param dataType can be one of data, metadata or lock_files. - * @return the blob path for the underlying remote store path type. - */ - public BlobPath path(BlobPath basePath, String indexUUID, String shardId, DataCategory dataCategory, DataType dataType) { - assert dataCategory.isSupportedDataType(dataType) : "category:" - + dataCategory - + " type:" - + dataType - + " are not supported together"; - return generatePath(basePath, indexUUID, shardId, dataCategory.getName(), dataType.getName()); - } - - abstract BlobPath generatePath(BlobPath basePath, String indexUUID, String shardId, String dataCategory, String dataType); - - public static RemoteStorePathType parseString(String remoteStoreBlobPathType) { - try { - return RemoteStorePathType.valueOf(remoteStoreBlobPathType.toUpperCase(Locale.ROOT)); - } catch (IllegalArgumentException | NullPointerException e) { - // IllegalArgumentException is thrown when the input does not match any enum name - // NullPointerException is thrown when the input is null - throw new IllegalArgumentException("Could not parse RemoteStorePathType for [" + remoteStoreBlobPathType + "]"); - } - } - - /** - * This string is used as key for storing information in the custom data in index settings. - */ - public static final String NAME = "path_type"; -} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f3a62cdf1436f..d1ebbd168df52 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -4944,7 +4944,7 @@ public void deleteTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); - RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathType()); + RemoteFsTranslog.cleanup(repository, shardId, getThreadPool(), indexSettings.getRemoteStorePathStrategy()); } /* @@ -4966,7 +4966,7 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { shardId, getThreadPool(), shardPath().resolveTranslog(), - indexSettings.getRemoteStorePathType(), + indexSettings.getRemoteStorePathStrategy(), logger ); } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 9779a2320d79f..c74ab5e24a980 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -58,7 +58,8 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineException; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; @@ -411,7 +412,8 @@ void recoverFromSnapshotAndRemoteStore( remoteStoreRepository, indexUUID, shardId, - RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); sourceRemoteDirectory.initializeToSpecificCommit( primaryTerm, diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 999625e0e579f..ec1163fe91b6c 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -31,7 +31,7 @@ import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.core.action.ActionListener; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; @@ -899,14 +899,14 @@ public static void remoteDirectoryCleanup( String remoteStoreRepoForIndex, String indexUUID, ShardId shardId, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { try { RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( remoteStoreRepoForIndex, indexUUID, shardId, - pathType + pathStrategy ); remoteSegmentStoreDirectory.deleteStaleSegments(0); remoteSegmentStoreDirectory.deleteIfEmpty(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index f0ecd96bcf1f7..e462f6d4ac011 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -13,7 +13,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; @@ -28,9 +28,9 @@ import java.util.Objects; import java.util.function.Supplier; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; /** * Factory for a remote store directory @@ -52,12 +52,12 @@ public RemoteSegmentStoreDirectoryFactory(Supplier reposito public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throws IOException { String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); - return newDirectory(repositoryName, indexUUID, path.getShardId(), indexSettings.getRemoteStorePathType()); + return newDirectory(repositoryName, indexUUID, path.getShardId(), indexSettings.getRemoteStorePathStrategy()); } - public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathType pathType) + public Directory newDirectory(String repositoryName, String indexUUID, ShardId shardId, RemoteStorePathStrategy pathStrategy) throws IOException { - assert Objects.nonNull(pathType); + assert Objects.nonNull(pathStrategy); try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; @@ -65,16 +65,30 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s BlobPath repositoryBasePath = blobStoreRepository.basePath(); String shardIdStr = String.valueOf(shardId.id()); + RemoteStorePathStrategy.PathInput dataPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(SEGMENTS) + .dataType(DATA) + .build(); // Derive the path for data directory of SEGMENTS - BlobPath dataPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, DATA); + BlobPath dataPath = pathStrategy.generatePath(dataPathInput); RemoteDirectory dataDirectory = new RemoteDirectory( blobStoreRepository.blobStore().blobContainer(dataPath), blobStoreRepository::maybeRateLimitRemoteUploadTransfers, blobStoreRepository::maybeRateLimitRemoteDownloadTransfers ); + RemoteStorePathStrategy.PathInput mdPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(SEGMENTS) + .dataType(METADATA) + .build(); // Derive the path for metadata directory of SEGMENTS - BlobPath mdPath = pathType.path(repositoryBasePath, indexUUID, shardIdStr, SEGMENTS, METADATA); + BlobPath mdPath = pathStrategy.generatePath(mdPathInput); RemoteDirectory metadataDirectory = new RemoteDirectory(blobStoreRepository.blobStore().blobContainer(mdPath)); // The path for lock is derived within the RemoteStoreLockManagerFactory @@ -83,7 +97,7 @@ public Directory newDirectory(String repositoryName, String indexUUID, ShardId s repositoryName, indexUUID, shardIdStr, - pathType + pathStrategy ); return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool, shardId); diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index c033e4f7ad0aa..45d466d3a8ce8 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -11,7 +11,7 @@ import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -20,8 +20,8 @@ import java.util.function.Supplier; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; /** * Factory for remote store lock manager @@ -36,8 +36,13 @@ public RemoteStoreLockManagerFactory(Supplier repositoriesS this.repositoriesService = repositoriesService; } - public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId, RemoteStorePathType pathType) { - return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId, pathType); + public RemoteStoreLockManager newLockManager( + String repositoryName, + String indexUUID, + String shardId, + RemoteStorePathStrategy pathStrategy + ) { + return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId, pathStrategy); } public static RemoteStoreMetadataLockManager newLockManager( @@ -45,12 +50,20 @@ public static RemoteStoreMetadataLockManager newLockManager( String repositoryName, String indexUUID, String shardId, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { try (Repository repository = repositoriesService.repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobPath repositoryBasePath = ((BlobStoreRepository) repository).basePath(); - BlobPath lockDirectoryPath = pathType.path(repositoryBasePath, indexUUID, shardId, SEGMENTS, LOCK_FILES); + + RemoteStorePathStrategy.PathInput lockFilesPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(repositoryBasePath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(SEGMENTS) + .dataType(LOCK_FILES) + .build(); + BlobPath lockDirectoryPath = pathStrategy.generatePath(lockFilesPathInput); BlobContainer lockDirectoryBlobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(lockDirectoryPath); return new RemoteStoreMetadataLockManager(new RemoteBufferedOutputDirectory(lockDirectoryBlobContainer)); } catch (RepositoryMissingException e) { diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index f0fb03cc905a4..bb7769eae1bf5 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -19,7 +19,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.util.FileSystemUtils; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteTranslogTransferTracker; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.index.translog.transfer.FileTransferTracker; @@ -50,9 +50,9 @@ import java.util.function.LongConsumer; import java.util.function.LongSupplier; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; /** * A Translog implementation which syncs local FS with a remote store @@ -113,7 +113,7 @@ public RemoteFsTranslog( shardId, fileTransferTracker, remoteTranslogTransferTracker, - indexSettings().getRemoteStorePathType() + indexSettings().getRemoteStorePathStrategy() ); try { download(translogTransferManager, location, logger); @@ -162,7 +162,7 @@ public static void download( ShardId shardId, ThreadPool threadPool, Path location, - RemoteStorePathType pathType, + RemoteStorePathStrategy pathStrategy, Logger logger ) throws IOException { assert repository instanceof BlobStoreRepository : String.format( @@ -181,7 +181,7 @@ public static void download( shardId, fileTransferTracker, remoteTranslogTransferTracker, - pathType + pathStrategy ); RemoteFsTranslog.download(translogTransferManager, location, logger); logger.trace(remoteTranslogTransferTracker.toString()); @@ -259,13 +259,27 @@ public static TranslogTransferManager buildTranslogTransferManager( ShardId shardId, FileTransferTracker fileTransferTracker, RemoteTranslogTransferTracker tracker, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { - assert Objects.nonNull(pathType); + assert Objects.nonNull(pathStrategy); String indexUUID = shardId.getIndex().getUUID(); String shardIdStr = String.valueOf(shardId.id()); - BlobPath dataPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, DATA); - BlobPath mdPath = pathType.path(blobStoreRepository.basePath(), indexUUID, shardIdStr, TRANSLOG, METADATA); + RemoteStorePathStrategy.PathInput dataPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(blobStoreRepository.basePath()) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(TRANSLOG) + .dataType(DATA) + .build(); + BlobPath dataPath = pathStrategy.generatePath(dataPathInput); + RemoteStorePathStrategy.PathInput mdPathInput = RemoteStorePathStrategy.PathInput.builder() + .basePath(blobStoreRepository.basePath()) + .indexUUID(indexUUID) + .shardId(shardIdStr) + .dataCategory(TRANSLOG) + .dataType(METADATA) + .build(); + BlobPath mdPath = pathStrategy.generatePath(mdPathInput); BlobStoreTransferService transferService = new BlobStoreTransferService(blobStoreRepository.blobStore(), threadPool); return new TranslogTransferManager(shardId, transferService, dataPath, mdPath, fileTransferTracker, tracker); } @@ -539,7 +553,7 @@ private void deleteStaleRemotePrimaryTerms() { } } - public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathType pathType) + public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool, RemoteStorePathStrategy pathStrategy) throws IOException { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; @@ -553,7 +567,7 @@ public static void cleanup(Repository repository, ShardId shardId, ThreadPool th shardId, fileTransferTracker, remoteTranslogTransferTracker, - pathType + pathStrategy ); // clean up all remote translog files translogTransferManager.deleteTranslogFiles(); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 28ad4b23e319c..717f4dad4f073 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -123,7 +123,7 @@ import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; import org.opensearch.index.refresh.RefreshStats; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStoreStatsTrackerFactory; import org.opensearch.index.search.stats.SearchStats; import org.opensearch.index.seqno.RetentionLeaseStats; @@ -309,10 +309,10 @@ public class IndicesService extends AbstractLifecycleComponent * This setting is used to set the remote store blob store path prefix strategy. This setting is effective only for * remote store enabled cluster. */ - public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( + public static final Setting CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING = new Setting<>( "cluster.remote_store.index.path.prefix.type", - RemoteStorePathType.FIXED.toString(), - RemoteStorePathType::parseString, + PathType.FIXED.toString(), + PathType::parseString, Property.NodeScope, Property.Dynamic ); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index a7c2fb03285b0..5aab02993db34 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -108,7 +108,8 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -671,7 +672,8 @@ public void cloneRemoteStoreIndexShardSnapshot( remoteStoreRepository, indexUUID, String.valueOf(shardId.shardId()), - RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManger.cloneLock( FileLockInfo.getLockInfoBuilder().withAcquirerId(source.getUUID()).build(), @@ -1110,7 +1112,7 @@ public static void remoteDirectoryCleanupAsync( String indexUUID, ShardId shardId, String threadPoolName, - RemoteStorePathType pathType + RemoteStorePathStrategy pathStrategy ) { threadpool.executor(threadPoolName) .execute( @@ -1120,7 +1122,7 @@ public static void remoteDirectoryCleanupAsync( remoteStoreRepoForIndex, indexUUID, shardId, - pathType + pathStrategy ), indexUUID, shardId @@ -1152,7 +1154,8 @@ protected void releaseRemoteStoreLockAndCleanup( remoteStoreRepoForIndex, indexUUID, shardId, - RemoteStorePathType.HASHED_PREFIX // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); remoteStoreMetadataLockManager.release(FileLockInfo.getLockInfoBuilder().withAcquirerId(shallowSnapshotUUID).build()); logger.debug("Successfully released lock for shard {} of index with uuid {}", shardId, indexUUID); @@ -1175,7 +1178,8 @@ protected void releaseRemoteStoreLockAndCleanup( indexUUID, new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), ThreadPool.Names.REMOTE_PURGE, - RemoteStorePathType.FIXED // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot + new RemoteStorePathStrategy(PathType.FIXED) + // TODO - The path type needs to be obtained from RemoteStoreShardShallowCopySnapshot ); } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index da2a36cbb0701..ff393ecf19a99 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -451,7 +451,7 @@ public ClusterState execute(ClusterState currentState) { .put(snapshotIndexMetadata.getSettings()) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) ); - createIndexService.addRemoteStorePathTypeInCustomData(indexMdBuilder, false); + createIndexService.addRemoteStorePathStrategyInCustomData(indexMdBuilder, false); shardLimitValidator.validateShardLimit( renamedIndexName, snapshotIndexMetadata.getSettings(), diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index cf4de32890a2a..a2f19b8c694d0 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -71,7 +71,8 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.translog.Translog; import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; @@ -1587,32 +1588,38 @@ public void testBuildIndexMetadata() { */ public void testRemoteCustomData() { // Case 1 - Remote store is not enabled - IndexMetadata indexMetadata = testRemoteCustomData(false, randomFrom(RemoteStorePathType.values())); + IndexMetadata indexMetadata = testRemoteCustomData(false, randomFrom(PathType.values())); assertNull(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY)); // Case 2 - cluster.remote_store.index.path.prefix.optimised=fixed (default value) - indexMetadata = testRemoteCustomData(true, RemoteStorePathType.FIXED); + indexMetadata = testRemoteCustomData(true, PathType.FIXED); + validateRemoteCustomData(indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), PathType.NAME, PathType.FIXED.name()); validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), - RemoteStorePathType.NAME, - RemoteStorePathType.FIXED.toString() + PathHashAlgorithm.NAME, + PathHashAlgorithm.FNV_1A.name() ); // Case 3 - cluster.remote_store.index.path.prefix.optimised=hashed_prefix - indexMetadata = testRemoteCustomData(true, RemoteStorePathType.HASHED_PREFIX); + indexMetadata = testRemoteCustomData(true, PathType.HASHED_PREFIX); validateRemoteCustomData( indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), - RemoteStorePathType.NAME, - RemoteStorePathType.HASHED_PREFIX.toString() + PathType.NAME, + PathType.HASHED_PREFIX.toString() + ); + validateRemoteCustomData( + indexMetadata.getCustomData(IndexMetadata.REMOTE_STORE_CUSTOM_KEY), + PathHashAlgorithm.NAME, + PathHashAlgorithm.FNV_1A.name() ); } - private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, RemoteStorePathType remoteStorePathType) { + private IndexMetadata testRemoteCustomData(boolean remoteStoreEnabled, PathType pathType) { Settings.Builder settingsBuilder = Settings.builder(); if (remoteStoreEnabled) { settingsBuilder.put(NODE_ATTRIBUTES.getKey() + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test"); } - settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), remoteStorePathType.toString()); + settingsBuilder.put(IndicesService.CLUSTER_REMOTE_STORE_PATH_PREFIX_TYPE_SETTING.getKey(), pathType.toString()); Settings settings = settingsBuilder.build(); ClusterService clusterService = mock(ClusterService.class); diff --git a/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java b/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java new file mode 100644 index 0000000000000..8d41211f10134 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/hashing/FNV1aTests.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.hashing; + +import org.opensearch.common.hash.FNV1a; +import org.opensearch.test.OpenSearchTestCase; + +public class FNV1aTests extends OpenSearchTestCase { + + public void testHash32WithKnownValues() { + assertEquals(-1114940029532279145L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentsdata")); + assertEquals(-5313793557685118702L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentsmetadata")); + assertEquals(-4776941653547780179L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0translogdata")); + assertEquals(7773876801598345624L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0translogmetadata")); + assertEquals(3174284101845744576L, FNV1a.hash32("1sH3kJO5TyeskNekv2YTbA0segmentslock_files")); + assertEquals(875447599647258598L, FNV1a.hash32("hell")); + assertEquals(1460560186469985451L, FNV1a.hash32("hello")); + assertEquals(-4959477702557352110L, FNV1a.hash32("hello w")); + assertEquals(-777130915070571257L, FNV1a.hash32("hello wo")); + assertEquals(-7887204531510399185L, FNV1a.hash32("hello wor")); + assertEquals(-782004333700192647L, FNV1a.hash32("hello worl")); + assertEquals(2168278929747165095L, FNV1a.hash32("hello world")); + assertEquals(2655121221658607504L, FNV1a.hash32("The quick brown fox jumps over the lazy dog")); + } + + public void testHash64WithKnownValues() { + assertEquals(-8975854101357662761L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentsdata")); + assertEquals(-4380291990281602606L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentsmetadata")); + assertEquals(-4532418109365814419L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0translogdata")); + assertEquals(41331743556869080L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0translogmetadata")); + assertEquals(6170437157231275808L, FNV1a.hash64("1sH3kJO5TyeskNekv2YTbA0segmentslock_files")); + assertEquals(763638091547294502L, FNV1a.hash64("hell")); + assertEquals(-6615550055289275125L, FNV1a.hash64("hello")); + assertEquals(-8428874042178798254L, FNV1a.hash64("hello w")); + assertEquals(-6323438951910650201L, FNV1a.hash64("hello wo")); + assertEquals(7042426588567368687L, FNV1a.hash64("hello wor")); + assertEquals(7273314957493782425L, FNV1a.hash64("hello worl")); + assertEquals(8618312879776256743L, FNV1a.hash64("hello world")); + assertEquals(-866459186506731248L, FNV1a.hash64("The quick brown fox jumps over the lazy dog")); + } + +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java similarity index 51% rename from server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java rename to server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index 0f108d1b45f5a..33008bee1a392 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathTypeTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -9,44 +9,46 @@ package org.opensearch.index.remote; import org.opensearch.common.blobstore.BlobPath; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory; -import org.opensearch.index.remote.RemoteStoreDataEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.DataCategory; +import org.opensearch.index.remote.RemoteStoreEnums.DataType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; import java.util.List; import java.util.Locale; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.SEGMENTS; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.DATA; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.LOCK_FILES; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; -import static org.opensearch.index.remote.RemoteStorePathType.FIXED; -import static org.opensearch.index.remote.RemoteStorePathType.parseString; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.parseString; -public class RemoteStorePathTypeTests extends OpenSearchTestCase { +public class RemoteStoreEnumsTests extends OpenSearchTestCase { private static final String SEPARATOR = "/"; public void testParseString() { // Case 1 - Pass values from the enum. String typeString = FIXED.toString(); - RemoteStorePathType type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); + PathType type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); assertEquals(FIXED, type); - typeString = RemoteStorePathType.HASHED_PREFIX.toString(); + typeString = PathType.HASHED_PREFIX.toString(); type = parseString(randomFrom(typeString, typeString.toLowerCase(Locale.ROOT))); - assertEquals(RemoteStorePathType.HASHED_PREFIX, type); + assertEquals(PathType.HASHED_PREFIX, type); // Case 2 - Pass random string String randomTypeString = randomAlphaOfLength(2); IllegalArgumentException ex = assertThrows(IllegalArgumentException.class, () -> parseString(randomTypeString)); - assertEquals("Could not parse RemoteStorePathType for [" + randomTypeString + "]", ex.getMessage()); + assertEquals("Could not parse PathType for [" + randomTypeString + "]", ex.getMessage()); // Case 3 - Null string ex = assertThrows(IllegalArgumentException.class, () -> parseString(null)); - assertEquals("Could not parse RemoteStorePathType for [null]", ex.getMessage()); + assertEquals("Could not parse PathType for [null]", ex.getMessage()); } public void testGeneratePathForFixedType() { @@ -63,32 +65,74 @@ public void testGeneratePathForFixedType() { String basePath = getPath(pathList) + indexUUID + SEPARATOR + shardId + SEPARATOR; // Translog Data - BlobPath result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + PathInput pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + BlobPath result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Translog Metadata dataType = METADATA; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Translog Lock files - This is a negative case where the assertion will trip. - BlobPath finalBlobPath = blobPath; - assertThrows(AssertionError.class, () -> FIXED.path(finalBlobPath, indexUUID, shardId, TRANSLOG, LOCK_FILES)); + dataType = LOCK_FILES; + PathInput finalPathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + assertThrows(AssertionError.class, () -> FIXED.path(finalPathInput, null)); // Segment Data dataCategory = SEGMENTS; dataType = DATA; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Segment Metadata dataType = METADATA; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); // Segment Metadata dataType = LOCK_FILES; - result = FIXED.path(blobPath, indexUUID, shardId, dataCategory, dataType); + pathInput = PathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .dataCategory(dataCategory) + .dataType(dataType) + .build(); + result = FIXED.path(pathInput, null); assertEquals(basePath + dataCategory.getName() + SEPARATOR + dataType.getName() + SEPARATOR, result.buildAsString()); } @@ -108,4 +152,5 @@ private String getPath(List pathList) { } return p + SEPARATOR; } + } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 59c8a9d92f07b..11b4eb078226f 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -37,7 +37,9 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.NRTReplicationEngineFactory; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; @@ -702,16 +704,19 @@ public void testCleanupAsync() throws Exception { String repositoryName = "test-repository"; String indexUUID = "test-idx-uuid"; ShardId shardId = new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt("0")); - RemoteStorePathType pathType = randomFrom(RemoteStorePathType.values()); + RemoteStorePathStrategy pathStrategy = new RemoteStorePathStrategy( + randomFrom(PathType.values()), + randomFrom(PathHashAlgorithm.values()) + ); RemoteSegmentStoreDirectory.remoteDirectoryCleanup( remoteSegmentStoreDirectoryFactory, repositoryName, indexUUID, shardId, - pathType + pathStrategy ); - verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathType); + verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathStrategy); verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); verify(remoteMetadataDirectory).delete(); verify(remoteDataDirectory).delete(); diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java index 0fe5557737447..de3dfbdaa4778 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactoryTests.java @@ -11,7 +11,8 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.index.remote.RemoteStorePathType; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; @@ -49,7 +50,7 @@ public void testNewLockManager() throws IOException { String testRepository = "testRepository"; String testIndexUUID = "testIndexUUID"; String testShardId = "testShardId"; - RemoteStorePathType pathType = RemoteStorePathType.FIXED; + RemoteStorePathStrategy pathStrategy = new RemoteStorePathStrategy(PathType.FIXED); BlobStoreRepository repository = mock(BlobStoreRepository.class); BlobStore blobStore = mock(BlobStore.class); @@ -65,7 +66,7 @@ public void testNewLockManager() throws IOException { testRepository, testIndexUUID, testShardId, - pathType + pathStrategy ); assertTrue(lockManager != null); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 9f72d3c7bd825..70800d4fd423a 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -99,7 +99,7 @@ import static org.opensearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; import static org.opensearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.contains; diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index a9502dc051428..49719017ce736 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -48,8 +48,8 @@ import org.mockito.Mockito; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataCategory.TRANSLOG; -import static org.opensearch.index.remote.RemoteStoreDataEnums.DataType.METADATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; import static org.opensearch.index.translog.transfer.TranslogTransferMetadata.METADATA_SEPARATOR; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; From eb66f62f2d6a4a4aea23fb3182991aaf9b46f560 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:27:48 +0530 Subject: [PATCH 32/56] Memoize isOnRemoteNode in index settings and refactor as well (#12994) * Memoize isOnRemoteNode in index settings and rename it as well --------- Signed-off-by: Gaurav Bafna --- .../org/opensearch/index/IndexSettings.java | 8 +++--- .../index/engine/NRTReplicationEngine.java | 2 +- .../RemoteStoreStatsTrackerFactory.java | 2 +- .../opensearch/index/shard/IndexShard.java | 26 +++++++++---------- .../org/opensearch/index/store/Store.java | 2 +- .../opensearch/index/translog/Translog.java | 2 +- .../opensearch/indices/IndicesService.java | 2 +- .../recovery/PeerRecoveryTargetService.java | 2 +- .../opensearch/index/IndexSettingsTests.java | 11 ++++++++ .../index/shard/IndexShardTestCase.java | 4 +-- 10 files changed, 37 insertions(+), 24 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index f17a254c4d3cd..f9062585c0093 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -763,6 +763,7 @@ public static IndexMergePolicy fromString(String text) { private volatile String defaultSearchPipeline; private final boolean widenIndexSortType; + private final boolean assignedOnRemoteNode; /** * The maximum age of a retention lease before it is considered expired. @@ -986,6 +987,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti * Now this sortField (IndexSort) is stored in SegmentInfo and we need to maintain backward compatibility for them. */ widenIndexSortType = IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).before(V_2_7_0); + assignedOnRemoteNode = RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); setEnableFuzzySetForDocId(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING)); setDocIdFuzzySetFalsePositiveProbability(scopedSettings.get(INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING)); @@ -1231,7 +1233,7 @@ public int getNumberOfReplicas() { * proper index setting during the migration. */ public boolean isSegRepEnabledOrRemoteNode() { - return ReplicationType.SEGMENT.equals(replicationType) || isRemoteNode(); + return ReplicationType.SEGMENT.equals(replicationType) || isAssignedOnRemoteNode(); } public boolean isSegRepLocalEnabled() { @@ -1249,8 +1251,8 @@ public boolean isRemoteStoreEnabled() { return isRemoteStoreEnabled; } - public boolean isRemoteNode() { - return RemoteStoreNodeAttribute.isRemoteDataAttributePresent(this.getNodeSettings()); + public boolean isAssignedOnRemoteNode() { + return assignedOnRemoteNode; } /** diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 1e1825e1f8ace..d759423ce5a55 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -437,7 +437,7 @@ protected final void closeNoLock(String reason, CountDownLatch closedLatch) { during promotion. */ if (engineConfig.getIndexSettings().isRemoteStoreEnabled() == false - && engineConfig.getIndexSettings().isRemoteNode() == false) { + && engineConfig.getIndexSettings().isAssignedOnRemoteNode() == false) { latestSegmentInfos.counter = latestSegmentInfos.counter + SI_COUNTER_INCREMENT; latestSegmentInfos.changed(); } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java index e4c7eb56d02c6..21753e68db498 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreStatsTrackerFactory.java @@ -68,7 +68,7 @@ public RemoteStoreStatsTrackerFactory(ClusterService clusterService, Settings se @Override public void afterIndexShardCreated(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteStoreEnabled() == false && indexShard.indexSettings().isRemoteNode() == false) { + if (indexShard.indexSettings().isRemoteStoreEnabled() == false && indexShard.indexSettings().isAssignedOnRemoteNode() == false) { return; } ShardId shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d1ebbd168df52..2b935b743512a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -406,7 +406,7 @@ public IndexShard( logger, threadPool, this::getEngine, - indexSettings.isRemoteNode(), + indexSettings.isAssignedOnRemoteNode(), () -> getRemoteTranslogUploadBufferInterval(remoteStoreSettings::getClusterRemoteTranslogBufferInterval) ); this.mapperService = mapperService; @@ -1469,7 +1469,7 @@ public SegmentsStats segmentStats(boolean includeSegmentFileSizes, boolean inclu SegmentsStats segmentsStats = getEngine().segmentsStats(includeSegmentFileSizes, includeUnloadedSegments); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); // Populate remote_store stats only if the index is remote store backed - if (indexSettings().isRemoteNode()) { + if (indexSettings().isAssignedOnRemoteNode()) { segmentsStats.addRemoteSegmentStats( new RemoteSegmentStats(remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(shardId).stats()) ); @@ -1491,7 +1491,7 @@ public FieldDataStats fieldDataStats(String... fields) { public TranslogStats translogStats() { TranslogStats translogStats = getEngine().translogManager().getTranslogStats(); // Populate remote_store stats only if the index is remote store backed - if (indexSettings.isRemoteNode()) { + if (indexSettings.isAssignedOnRemoteNode()) { translogStats.addRemoteTranslogStats( new RemoteTranslogStats(remoteStoreStatsTrackerFactory.getRemoteTranslogTransferTracker(shardId).stats()) ); @@ -1530,7 +1530,7 @@ public void flush(FlushRequest request) { * {@link org.opensearch.index.translog.TranslogDeletionPolicy} for details */ public void trimTranslog() { - if (indexSettings.isRemoteNode()) { + if (indexSettings.isAssignedOnRemoteNode()) { return; } verifyNotClosed(); @@ -2050,7 +2050,7 @@ public void close(String reason, boolean flushEngine, boolean deleted) throws IO ToDo : Fix this https://github.com/opensearch-project/OpenSearch/issues/8003 */ public RemoteSegmentStoreDirectory getRemoteDirectory() { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); assert remoteStore.directory() instanceof FilterDirectory : "Store.directory is not an instance of FilterDirectory"; FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); @@ -2063,7 +2063,7 @@ public RemoteSegmentStoreDirectory getRemoteDirectory() { * is in sync with local */ public boolean isRemoteSegmentStoreInSync() { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); try { RemoteSegmentStoreDirectory directory = getRemoteDirectory(); if (directory.readLatestMetadataFile() != null) { @@ -2102,7 +2102,7 @@ public void waitForRemoteStoreSync() { Calls onProgress on seeing an increased file count on remote */ public void waitForRemoteStoreSync(Runnable onProgress) { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); RemoteSegmentStoreDirectory directory = getRemoteDirectory(); int segmentUploadeCount = 0; if (shardRouting.primary() == false) { @@ -2277,7 +2277,7 @@ public long recoverLocallyAndFetchStartSeqNo(boolean localTranslog) { * @return the starting sequence number from which the recovery should start. */ private long recoverLocallyUptoLastCommit() { - assert indexSettings.isRemoteNode() : "Remote translog store is not enabled"; + assert indexSettings.isAssignedOnRemoteNode() : "Remote translog store is not enabled"; long seqNo; validateLocalRecoveryState(); @@ -3540,7 +3540,7 @@ assert getLocalCheckpoint() == primaryContext.getCheckpointStates().get(allocati } private void postActivatePrimaryMode() { - if (indexSettings.isRemoteNode()) { + if (indexSettings.isAssignedOnRemoteNode()) { // We make sure to upload translog (even if it does not contain any operations) to remote translog. // This helps to get a consistent state in remote store where both remote segment store and remote // translog contains data. @@ -4010,7 +4010,7 @@ public boolean enableUploadToRemoteTranslog() { } private boolean hasOneRemoteSegmentSyncHappened() { - assert indexSettings.isRemoteNode(); + assert indexSettings.isAssignedOnRemoteNode(); // We upload remote translog only after one remote segment upload in case of migration RemoteSegmentStoreDirectory rd = getRemoteDirectory(); AtomicBoolean segment_n_uploaded = new AtomicBoolean(false); @@ -4624,7 +4624,7 @@ public final boolean isSearchIdle() { public final boolean isSearchIdleSupported() { // If the index is remote store backed, then search idle is not supported. This is to ensure that async refresh // task continues to upload to remote store periodically. - if (isRemoteTranslogEnabled() || indexSettings.isRemoteNode()) { + if (isRemoteTranslogEnabled() || indexSettings.isAssignedOnRemoteNode()) { return false; } return indexSettings.isSegRepEnabledOrRemoteNode() == false || indexSettings.getNumberOfReplicas() == 0; @@ -5263,9 +5263,9 @@ enum ShardMigrationState { } static ShardMigrationState getShardMigrationState(IndexSettings indexSettings, boolean shouldSeed) { - if (indexSettings.isRemoteNode() && indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isAssignedOnRemoteNode() && indexSettings.isRemoteStoreEnabled()) { return REMOTE_NON_MIGRATING; - } else if (indexSettings.isRemoteNode()) { + } else if (indexSettings.isAssignedOnRemoteNode()) { return shouldSeed ? REMOTE_MIGRATING_UNSEEDED : REMOTE_MIGRATING_SEEDED; } return ShardMigrationState.DOCREP_NON_MIGRATING; diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 0992d86d6f0aa..56fc5b1ffa90d 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -893,7 +893,7 @@ public void beforeClose() { * @throws IOException when there is an IO error committing. */ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, long processedCheckpoint) throws IOException { - assert indexSettings.isSegRepEnabledOrRemoteNode() || indexSettings.isRemoteNode(); + assert indexSettings.isSegRepEnabledOrRemoteNode() || indexSettings.isAssignedOnRemoteNode(); metadataLock.writeLock().lock(); try { final Map userData = new HashMap<>(latestSegmentInfos.getUserData()); diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index e78300e368099..7c50ed6ecd58f 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -525,7 +525,7 @@ TranslogWriter createWriter( tragedy, persistedSequenceNumberConsumer, bigArrays, - indexSettings.isRemoteNode() + indexSettings.isAssignedOnRemoteNode() ); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 717f4dad4f073..ba78c28f3db88 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -933,7 +933,7 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isRemoteSnapshot()) { return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); } - if (idxSettings.isSegRepEnabledOrRemoteNode() || idxSettings.isRemoteNode()) { + if (idxSettings.isSegRepEnabledOrRemoteNode() || idxSettings.isAssignedOnRemoteNode()) { return new NRTReplicationEngineFactory(); } return new InternalEngineFactory(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 227496f72f83d..c24840d0c1333 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -261,7 +261,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi } } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false - && indexShard.indexSettings().isRemoteNode(); + && indexShard.indexSettings().isAssignedOnRemoteNode(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); final boolean verifyTranslog = (hasRemoteTranslog || hasNoTranslog || hasRemoteSegmentStore) == false; final long startingSeqNo = indexShard.recoverLocallyAndFetchStartSeqNo(!hasRemoteTranslog); diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index e4ce879a5ec5e..474ec73d5fe61 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -1053,4 +1053,15 @@ public void testDefaultSearchPipeline() throws Exception { settings.updateIndexMetadata(metadata); assertEquals("foo", settings.getDefaultSearchPipeline()); } + + public void testIsOnRemoteNode() { + Version version = VersionUtils.getPreviousVersion(); + Settings theSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, version) + .put(IndexMetadata.SETTING_INDEX_UUID, "0xdeadbeef") + .build(); + Settings nodeSettings = Settings.builder().put("node.attr.remote_store.translog.repository", "my-repo-1").build(); + IndexSettings settings = newIndexSettings(newIndexMeta("index", theSettings), nodeSettings); + assertTrue("Index should be on remote node", settings.isAssignedOnRemoteNode()); + } } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 9baa6110ab54e..80d16a8243634 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -654,7 +654,7 @@ protected IndexShard newShard( RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory = null; RepositoriesService mockRepoSvc = mock(RepositoriesService.class); - if (indexSettings.isRemoteStoreEnabled() || indexSettings.isRemoteNode()) { + if (indexSettings.isRemoteStoreEnabled() || indexSettings.isAssignedOnRemoteNode()) { String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); // remote path via setting a repository . This is a hack used for shards are created using reset . // since we can't get remote path from IndexShard directly, we are using repository to store it . @@ -1498,7 +1498,7 @@ private SegmentReplicationTargetService prepareForReplication( SegmentReplicationSourceFactory sourceFactory = null; SegmentReplicationTargetService targetService; - if (primaryShard.indexSettings.isRemoteStoreEnabled() || primaryShard.indexSettings.isRemoteNode()) { + if (primaryShard.indexSettings.isRemoteStoreEnabled() || primaryShard.indexSettings.isAssignedOnRemoteNode()) { RecoverySettings recoverySettings = new RecoverySettings( Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) From 8def8cb7cdc56fe7b6930377001513391f067422 Mon Sep 17 00:00:00 2001 From: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:56:04 +0530 Subject: [PATCH 33/56] [Remote Store] Add Primary/Replica side changes to support Dual Replication during Remote Store Migration (#12821) Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> --- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../MigrationBaseTestCase.java | 95 +++- .../RemoteDualReplicationIT.java | 530 ++++++++++++++++++ .../action/bulk/TransportShardBulkAction.java | 2 +- .../ReplicationModeAwareProxy.java | 28 +- .../TransportReplicationAction.java | 12 +- .../org/opensearch/index/IndexService.java | 7 +- .../org/opensearch/index/IndexSettings.java | 6 +- .../seqno/GlobalCheckpointSyncAction.java | 2 +- .../index/seqno/ReplicationTracker.java | 69 ++- .../shard/CheckpointRefreshListener.java | 2 +- .../opensearch/index/shard/IndexShard.java | 37 +- .../opensearch/indices/IndicesService.java | 7 +- .../cluster/IndicesClusterStateService.java | 12 +- .../RecoverySourceHandlerFactory.java | 3 +- .../indices/recovery/RecoveryTarget.java | 11 +- .../SegmentReplicationSourceFactory.java | 2 +- .../checkpoint/PublishCheckpointAction.java | 8 +- ...portVerifyShardBeforeCloseActionTests.java | 7 +- .../flush/TransportShardFlushActionTests.java | 5 +- ...sportVerifyShardIndexBlockActionTests.java | 5 +- .../TransportShardRefreshActionTests.java | 5 +- .../bulk/TransportShardBulkActionTests.java | 5 +- ...TransportResyncReplicationActionTests.java | 5 +- .../ReplicationModeAwareProxyTests.java | 216 +++++++ .../ReplicationOperationTests.java | 196 ++++++- .../TransportReplicationActionTests.java | 7 + .../index/remote/RemoteStoreTestsHelper.java | 19 + .../RetentionLeasesReplicationTests.java | 4 +- .../GlobalCheckpointSyncActionTests.java | 8 +- ...PeerRecoveryRetentionLeaseExpiryTests.java | 3 +- ...ReplicationTrackerRetentionLeaseTests.java | 48 +- .../seqno/ReplicationTrackerTestCase.java | 23 +- .../index/seqno/ReplicationTrackerTests.java | 32 +- ...tentionLeaseBackgroundSyncActionTests.java | 5 +- .../seqno/RetentionLeaseSyncActionTests.java | 7 +- .../index/shard/IndexShardTests.java | 21 +- .../shard/PrimaryReplicaSyncerTests.java | 6 +- ...dicesLifecycleListenerSingleNodeTests.java | 4 +- ...actIndicesClusterStateServiceTestCase.java | 7 +- .../PeerRecoverySourceServiceTests.java | 5 +- .../PeerRecoveryTargetServiceTests.java | 5 +- .../PublishCheckpointActionTests.java | 41 +- .../index/engine/EngineTestCase.java | 3 +- ...enSearchIndexLevelReplicationTestCase.java | 31 +- .../index/shard/IndexShardTestCase.java | 69 ++- .../index/shard/IndexShardTestUtils.java | 67 +++ 47 files changed, 1540 insertions(+), 155 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java create mode 100644 server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java create mode 100644 test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index d218f0a985cf3..f97950f2652a3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -715,7 +715,8 @@ public static final IndexShard newIndexShard( nodeId, null, DefaultRemoteStoreSettings.INSTANCE, - false + false, + IndexShardTestUtils.getFakeDiscoveryNodes(initializingShardRouting) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 19da668c432cf..0c35f91121059 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -8,17 +8,29 @@ package org.opensearch.remotemigration; +import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.bulk.BulkRequest; +import org.opensearch.action.bulk.BulkResponse; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.repositories.fs.ReloadableFsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; +import java.util.List; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -28,8 +40,16 @@ public class MigrationBaseTestCase extends OpenSearchIntegTestCase { protected Path segmentRepoPath; protected Path translogRepoPath; - boolean addRemote = false; + Settings extraSettings = Settings.EMPTY; + + private final List documentKeys = List.of( + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5) + ); protected Settings nodeSettings(int nodeOrdinal) { if (segmentRepoPath == null || translogRepoPath == null) { @@ -40,6 +60,7 @@ protected Settings nodeSettings(int nodeOrdinal) { logger.info("Adding remote store node"); return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) + .put(extraSettings) .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) .build(); } else { @@ -64,4 +85,76 @@ protected void setFailRate(String repoName, int value) throws ExecutionException client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() ); } + + public void initDocRepToRemoteMigration() { + assertTrue( + internalCluster().client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings( + Settings.builder() + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + ) + .get() + .isAcknowledged() + ); + } + + public BulkResponse indexBulk(String indexName, int numDocs) { + BulkRequest bulkRequest = new BulkRequest(); + for (int i = 0; i < numDocs; i++) { + final IndexRequest request = client().prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)) + .request(); + bulkRequest.add(request); + } + return client().bulk(bulkRequest).actionGet(); + } + + private void indexSingleDoc(String indexName) { + IndexResponse indexResponse = client().prepareIndex(indexName).setId("id").setSource("field", "value").get(); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + DeleteResponse deleteResponse = client().prepareDelete(indexName, "id").get(); + assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); + client().prepareIndex(indexName).setSource("auto", true).get(); + } + + public class AsyncIndexingService { + private String indexName; + private AtomicLong indexedDocs = new AtomicLong(0); + private AtomicBoolean finished = new AtomicBoolean(); + private Thread indexingThread; + + AsyncIndexingService(String indexName) { + this.indexName = indexName; + } + + public void startIndexing() { + indexingThread = getIndexingThread(); + indexingThread.start(); + } + + public void stopIndexing() throws InterruptedException { + finished.set(true); + indexingThread.join(); + } + + public long getIndexedDocs() { + return indexedDocs.get(); + } + + private Thread getIndexingThread() { + return new Thread(() -> { + while (finished.get() == false) { + indexSingleDoc(indexName); + long currentDocCount = indexedDocs.incrementAndGet(); + logger.info("Completed ingestion of {} docs", currentDocCount); + + } + }); + } + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java new file mode 100644 index 0000000000000..34b60d5f3e9b3 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java @@ -0,0 +1,530 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotemigration; + +import org.opensearch.action.admin.indices.stats.CommonStats; +import org.opensearch.action.admin.indices.stats.ShardStats; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexService; +import org.opensearch.index.remote.RemoteSegmentStats; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.indices.IndexingMemoryController; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteDualReplicationIT extends MigrationBaseTestCase { + private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; + private final String REMOTE_PRI_DOCREP_REMOTE_REP = "remote-primary-docrep-remote-replica"; + private final String FAILOVER_REMOTE_TO_DOCREP = "failover-remote-to-docrep"; + + @Override + protected Collection> nodePlugins() { + /* Adding the following mock plugins: + - InternalSettingsPlugin : To override default intervals of retention lease and global ckp sync + - MockFsRepositoryPlugin and MockTransportService.TestPlugin: To ensure remote interactions are not no-op and retention leases are properly propagated + */ + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(InternalSettingsPlugin.class, MockFsRepositoryPlugin.class, MockTransportService.TestPlugin.class) + ).collect(Collectors.toList()); + } + + /* + Scenario: + - Starts 2 docrep backed node + - Creates index with 1 replica + - Index some docs + - Start 1 remote backed node + - Move primary copy from docrep to remote through _cluster/reroute + - Index some more docs + - Assert primary-replica consistency + */ + public void testRemotePrimaryDocRepReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 2 docrep data nodes"); + internalCluster().startDataOnlyNodes(2); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 1 replica"); + Settings oneReplica = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + createIndex(REMOTE_PRI_DOCREP_REP, oneReplica); + ensureGreen(REMOTE_PRI_DOCREP_REP); + + int initialBatch = randomIntBetween(1, 1000); + logger.info("---> Indexing {} docs", initialBatch); + indexBulk(REMOTE_PRI_DOCREP_REP, initialBatch); + + initDocRepToRemoteMigration(); + + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + String primaryShardHostingNode = primaryNodeName(REMOTE_PRI_DOCREP_REP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(REMOTE_PRI_DOCREP_REP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REP); + ClusterState clusterState = internalCluster().client().admin().cluster().prepareState().get().getState(); + String primaryShardHostingNodeId = clusterState.getRoutingTable() + .index(REMOTE_PRI_DOCREP_REP) + .shard(0) + .primaryShard() + .currentNodeId(); + assertTrue(clusterState.getNodes().get(primaryShardHostingNodeId).isRemoteStoreNode()); + + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing another {} docs", secondBatch); + indexBulk(REMOTE_PRI_DOCREP_REP, secondBatch); + // Defensive check to ensure that doc count in replica shard catches up to the primary copy + refreshAndWaitForReplication(REMOTE_PRI_DOCREP_REP); + assertReplicaAndPrimaryConsistency(REMOTE_PRI_DOCREP_REP, initialBatch, secondBatch); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Index some docs + - Move primary copy from docrep to remote through _cluster/reroute + - Starts another remote backed data node + - Expands index to 2 replicas. One replica copy lies in remote backed node and other in docrep backed node + - Index some more docs + - Assert primary-replica consistency + */ + public void testRemotePrimaryDocRepAndRemoteReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 1 docrep data nodes"); + String docrepNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 0 replica"); + Settings zeroReplicas = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .build(); + createIndex(REMOTE_PRI_DOCREP_REMOTE_REP, zeroReplicas); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + initDocRepToRemoteMigration(); + + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + int firstBatch = randomIntBetween(1, 100); + logger.info("---> Indexing {} docs", firstBatch); + indexBulk(REMOTE_PRI_DOCREP_REMOTE_REP, firstBatch); + + String primaryShardHostingNode = primaryNodeName(REMOTE_PRI_DOCREP_REMOTE_REP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(REMOTE_PRI_DOCREP_REMOTE_REP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + ClusterState clusterState = internalCluster().client().admin().cluster().prepareState().get().getState(); + String primaryShardHostingNodeId = clusterState.getRoutingTable() + .index(REMOTE_PRI_DOCREP_REMOTE_REP) + .shard(0) + .primaryShard() + .currentNodeId(); + assertTrue(clusterState.getNodes().get(primaryShardHostingNodeId).isRemoteStoreNode()); + + logger.info("---> Starting another remote enabled node"); + internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + + logger.info("---> Expanding index to 2 replica copies"); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2).build(); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings() + .setIndices(REMOTE_PRI_DOCREP_REMOTE_REP) + .setSettings(twoReplicas) + .get() + ); + ensureGreen(REMOTE_PRI_DOCREP_REMOTE_REP); + + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing another {} docs", secondBatch); + indexBulk(REMOTE_PRI_DOCREP_REMOTE_REP, secondBatch); + // Defensive check to ensure that doc count in replica shard catches up to the primary copy + refreshAndWaitForReplication(REMOTE_PRI_DOCREP_REMOTE_REP); + assertReplicaAndPrimaryConsistency(REMOTE_PRI_DOCREP_REMOTE_REP, firstBatch, secondBatch); + } + + /* + Checks if retention leases are published on primary shard and it's docrep copies, but not on remote copies + */ + public void testRetentionLeasePresentOnDocrepReplicaButNotRemote() throws Exception { + /* Reducing indices.memory.shard_inactive_time to force a flush and trigger translog sync, + instead of relying on Global CKP Sync action which doesn't run on remote enabled copies + + Under steady state, RetentionLeases would be on (GlobalCkp + 1) on a + docrep enabled shard copy and (GlobalCkp) for a remote enabled shard copy. + This is because we block translog sync on remote enabled shard copies during the GlobalCkpSync background task. + + RLs on remote enabled copies are brought up to (GlobalCkp + 1) upon a flush request issued by IndexingMemoryController + when the shard becomes inactive after SHARD_INACTIVE_TIME_SETTING interval. + + Flush triggers a force sync of translog which bumps the RetentionLease sequence number along with it + */ + extraSettings = Settings.builder().put(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.getKey(), "3s").build(); + testRemotePrimaryDocRepAndRemoteReplica(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertBusy(() -> { + for (ShardStats shardStats : internalCluster().client() + .admin() + .indices() + .prepareStats(REMOTE_PRI_DOCREP_REMOTE_REP) + .get() + .getShards()) { + ShardRouting shardRouting = shardStats.getShardRouting(); + DiscoveryNode discoveryNode = nodes.get(shardRouting.currentNodeId()); + RetentionLeases retentionLeases = shardStats.getRetentionLeaseStats().retentionLeases(); + if (shardRouting.primary()) { + // Primary copy should be on remote node and should have retention leases + assertTrue(discoveryNode.isRemoteStoreNode()); + assertCheckpointsConsistency(shardStats); + assertRetentionLeaseConsistency(shardStats, retentionLeases); + } else { + // Checkpoints and Retention Leases are not synced to remote replicas + if (discoveryNode.isRemoteStoreNode()) { + assertTrue(shardStats.getRetentionLeaseStats().retentionLeases().leases().isEmpty()); + } else { + // Replica copy on docrep node should have retention leases + assertCheckpointsConsistency(shardStats); + assertRetentionLeaseConsistency(shardStats, retentionLeases); + } + } + } + }); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Move primary copy from docrep to remote through _cluster/reroute + - Expands index to 1 replica + - Stops remote enabled node + - Ensure doc count is same after failover + - Index some more docs to ensure working of failed-over primary + */ + public void testFailoverRemotePrimaryToDocrepReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + + logger.info("---> Starting 1 docrep data nodes"); + String docrepNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals(internalCluster().client().admin().cluster().prepareGetRepositories().get().repositories().size(), 0); + + logger.info("---> Creating index with 0 replica"); + Settings excludeRemoteNode = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build(); + createIndex(FAILOVER_REMOTE_TO_DOCREP, excludeRemoteNode); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + initDocRepToRemoteMigration(); + logger.info("---> Starting 1 remote enabled data node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + logger.info("---> Starting doc ingestion in parallel thread"); + AsyncIndexingService asyncIndexingService = new AsyncIndexingService(FAILOVER_REMOTE_TO_DOCREP); + asyncIndexingService.startIndexing(); + + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_DOCREP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + logger.info("---> Expanding index to 1 replica copy"); + Settings twoReplicas = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings() + .setIndices(FAILOVER_REMOTE_TO_DOCREP) + .setSettings(twoReplicas) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Stopping indexing thread"); + asyncIndexingService.stopIndexing(); + + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_DOCREP); + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(FAILOVER_REMOTE_TO_DOCREP) + .setDocs(true) + .get() + .asMap(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + long initialPrimaryDocCount = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertTrue(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + initialPrimaryDocCount = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + int firstBatch = (int) asyncIndexingService.getIndexedDocs(); + assertReplicaAndPrimaryConsistency(FAILOVER_REMOTE_TO_DOCREP, firstBatch, 0); + + logger.info("---> Stop remote store enabled node"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(remoteNodeName)); + ensureStableCluster(2); + ensureYellow(FAILOVER_REMOTE_TO_DOCREP); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_DOCREP).setDocs(true).get().asMap(); + nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + long primaryDocCountAfterFailover = 0; + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + if (shardRouting.primary()) { + assertFalse(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + primaryDocCountAfterFailover = shardStatsMap.get(shardRouting).getStats().getDocs().getCount(); + } + } + assertEquals(initialPrimaryDocCount, primaryDocCountAfterFailover); + + logger.info("---> Index some more docs to ensure that the failed over primary is ingesting new docs"); + int secondBatch = randomIntBetween(1, 10); + logger.info("---> Indexing {} more docs", secondBatch); + indexBulk(FAILOVER_REMOTE_TO_DOCREP, secondBatch); + refreshAndWaitForReplication(FAILOVER_REMOTE_TO_DOCREP); + + shardStatsMap = internalCluster().client().admin().indices().prepareStats(FAILOVER_REMOTE_TO_DOCREP).setDocs(true).get().asMap(); + assertEquals(1, shardStatsMap.size()); + shardStatsMap.forEach( + (shardRouting, shardStats) -> { assertEquals(firstBatch + secondBatch, shardStats.getStats().getDocs().getCount()); } + ); + } + + /* + Scenario: + - Starts 1 docrep backed data node + - Creates an index with 0 replica + - Starts 1 remote backed data node + - Move primary copy from docrep to remote through _cluster/reroute + - Expands index to 1 replica + - Stops remote enabled node + - Ensure doc count is same after failover + - Index some more docs to ensure working of failed-over primary + - Starts another remote node + - Move primary copy from docrep to remote through _cluster/reroute + - Ensure that remote store is seeded in the new remote node by asserting remote uploads from that node > 0 + */ + public void testFailoverRemotePrimaryToDocrepReplicaReseedToRemotePrimary() throws Exception { + testFailoverRemotePrimaryToDocrepReplica(); + + logger.info("---> Removing replica copy"); + assertAcked( + internalCluster().client() + .admin() + .indices() + .prepareUpdateSettings(FAILOVER_REMOTE_TO_DOCREP) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + logger.info("---> Starting a new remote enabled node"); + addRemote = true; + String remoteNodeName = internalCluster().startDataOnlyNode(); + internalCluster().validateClusterFormed(); + assertEquals( + internalCluster().client() + .admin() + .cluster() + .prepareGetRepositories(REPOSITORY_NAME, REPOSITORY_2_NAME) + .get() + .repositories() + .size(), + 2 + ); + + String primaryShardHostingNode = primaryNodeName(FAILOVER_REMOTE_TO_DOCREP); + logger.info("---> Moving primary copy from {} to remote enabled node {}", primaryShardHostingNode, remoteNodeName); + assertAcked( + internalCluster().client() + .admin() + .cluster() + .prepareReroute() + .add(new MoveAllocationCommand(FAILOVER_REMOTE_TO_DOCREP, 0, primaryShardHostingNode, remoteNodeName)) + .get() + ); + ensureGreen(FAILOVER_REMOTE_TO_DOCREP); + + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(FAILOVER_REMOTE_TO_DOCREP) + .get() + .asMap(); + DiscoveryNodes discoveryNodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + assertEquals(1, shardStatsMap.size()); + shardStatsMap.forEach((shardRouting, shardStats) -> { + if (discoveryNodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()) { + RemoteSegmentStats remoteSegmentStats = shardStats.getStats().getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + } + }); + } + + private void assertReplicaAndPrimaryConsistency(String indexName, int firstBatch, int secondBatch) throws Exception { + assertBusy(() -> { + Map shardStatsMap = internalCluster().client() + .admin() + .indices() + .prepareStats(indexName) + .setDocs(true) + .get() + .asMap(); + DiscoveryNodes nodes = internalCluster().client().admin().cluster().prepareState().get().getState().getNodes(); + for (ShardRouting shardRouting : shardStatsMap.keySet()) { + CommonStats shardStats = shardStatsMap.get(shardRouting).getStats(); + if (shardRouting.primary()) { + assertEquals(firstBatch + secondBatch, shardStats.getDocs().getCount()); + assertTrue(nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode()); + RemoteSegmentStats remoteSegmentStats = shardStats.getSegments().getRemoteSegmentStats(); + assertTrue(remoteSegmentStats.getUploadBytesSucceeded() > 0); + assertTrue(remoteSegmentStats.getTotalUploadTime() > 0); + } else { + boolean remoteNode = nodes.get(shardRouting.currentNodeId()).isRemoteStoreNode(); + assertEquals( + "Mismatched doc count. Is this on remote node ? " + remoteNode, + firstBatch + secondBatch, + shardStats.getDocs().getCount() + ); + RemoteSegmentStats remoteSegmentStats = shardStats.getSegments().getRemoteSegmentStats(); + if (remoteNode) { + assertTrue(remoteSegmentStats.getDownloadBytesStarted() > 0); + assertTrue(remoteSegmentStats.getTotalDownloadTime() > 0); + } else { + assertEquals(0, remoteSegmentStats.getUploadBytesSucceeded()); + assertEquals(0, remoteSegmentStats.getTotalUploadTime()); + } + } + } + }); + } + + /** + * For a docrep enabled shard copy or a primary shard copy, + * asserts that the stored Retention Leases equals to 1 + maxSeqNo ingested on the node + * + * @param shardStats ShardStats object from NodesStats API + * @param retentionLeases RetentionLeases from NodesStats API + */ + private static void assertRetentionLeaseConsistency(ShardStats shardStats, RetentionLeases retentionLeases) { + long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + for (RetentionLease rl : retentionLeases.leases()) { + assertEquals(maxSeqNo + 1, rl.retainingSequenceNumber()); + } + } + + /** + * For a docrep enabled shard copy or a primary shard copy, + * asserts that local and global checkpoints are up-to-date with maxSeqNo of doc operations + * + * @param shardStats ShardStats object from NodesStats API + */ + private static void assertCheckpointsConsistency(ShardStats shardStats) { + long maxSeqNo = shardStats.getSeqNoStats().getMaxSeqNo(); + long localCkp = shardStats.getSeqNoStats().getLocalCheckpoint(); + long globalCkp = shardStats.getSeqNoStats().getGlobalCheckpoint(); + + assertEquals(maxSeqNo, localCkp); + assertEquals(maxSeqNo, globalCkp); + } +} diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index a7a13afd2597c..c1d13128c18b1 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -442,7 +442,7 @@ protected long primaryOperationSize(BulkShardRequest request) { @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isRemoteNode()) { return ReplicationMode.PRIMARY_TERM_VALIDATION; } return super.getReplicationMode(indexShard); diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java index 189bc82348a0c..9f5e31a9c6926 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java @@ -9,6 +9,8 @@ package org.opensearch.action.support.replication; import org.opensearch.action.support.replication.ReplicationOperation.ReplicaResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.core.action.ActionListener; @@ -31,14 +33,22 @@ public class ReplicationModeAwareProxy primaryTermValidationProxy; + private final DiscoveryNodes discoveryNodes; + + private final boolean isRemoteEnabled; + public ReplicationModeAwareProxy( ReplicationMode replicationModeOverride, + DiscoveryNodes discoveryNodes, ReplicationOperation.Replicas replicasProxy, - ReplicationOperation.Replicas primaryTermValidationProxy + ReplicationOperation.Replicas primaryTermValidationProxy, + boolean remoteIndexSettingsEnabled ) { super(replicasProxy); this.replicationModeOverride = Objects.requireNonNull(replicationModeOverride); this.primaryTermValidationProxy = Objects.requireNonNull(primaryTermValidationProxy); + this.discoveryNodes = discoveryNodes; + this.isRemoteEnabled = remoteIndexSettingsEnabled; } @Override @@ -60,16 +70,26 @@ protected void performOnReplicaProxy( @Override ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { - // If the current routing is the primary, then it does not need to be replicated if (shardRouting.isSameAllocation(primaryRouting)) { return ReplicationMode.NO_REPLICATION; } - + // Perform full replication during primary relocation if (primaryRouting.relocating() && shardRouting.isSameAllocation(primaryRouting.getTargetRelocatingShard())) { return ReplicationMode.FULL_REPLICATION; } - + /* + Only applicable during remote store migration. + During the migration process, remote based index settings will not be enabled, + thus we will rely on node attributes to figure out the replication mode + */ + if (isRemoteEnabled == false) { + DiscoveryNode targetNode = discoveryNodes.get(shardRouting.currentNodeId()); + if (targetNode != null && targetNode.isRemoteStoreNode() == false) { + // Perform full replication if replica is hosted on a non-remote node. + return ReplicationMode.FULL_REPLICATION; + } + } return replicationModeOverride; } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 95f998e2d89c2..8d86128e36441 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -356,7 +356,7 @@ public void performOn( * @return the overridden replication mode. */ public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isRemoteNode()) { return ReplicationMode.NO_REPLICATION; } return ReplicationMode.FULL_REPLICATION; @@ -642,8 +642,14 @@ public void handleException(TransportException exp) { primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, retryTimeout, - indexShard.isRemoteTranslogEnabled() - ? new ReplicationModeAwareProxy<>(getReplicationMode(indexShard), replicasProxy, termValidationProxy) + indexShard.indexSettings().isRemoteNode() + ? new ReplicationModeAwareProxy<>( + getReplicationMode(indexShard), + clusterState.getNodes(), + replicasProxy, + termValidationProxy, + indexShard.isRemoteTranslogEnabled() + ) : new FanoutReplicationProxy<>(replicasProxy) ).execute(); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 9ded1b174d4c6..a7b29314210df 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; @@ -462,7 +463,8 @@ public synchronized IndexShard createShard( final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, final RepositoriesService repositoriesService, final DiscoveryNode targetNode, - @Nullable DiscoveryNode sourceNode + @Nullable DiscoveryNode sourceNode, + DiscoveryNodes discoveryNodes ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -553,7 +555,8 @@ public synchronized IndexShard createShard( nodeEnv.nodeId(), recoverySettings, remoteStoreSettings, - seedRemote + seedRemote, + discoveryNodes ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index f9062585c0093..82875564c1c07 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -1237,11 +1237,7 @@ public boolean isSegRepEnabledOrRemoteNode() { } public boolean isSegRepLocalEnabled() { - return isSegRepEnabledOrRemoteNode() && !isRemoteStoreEnabled(); - } - - public boolean isSegRepWithRemoteEnabled() { - return isSegRepEnabledOrRemoteNode() && isRemoteStoreEnabled(); + return ReplicationType.SEGMENT.equals(replicationType) && !isRemoteStoreEnabled(); } /** diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index ca1dfe2d5ad01..0c167d6d80b5c 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -135,7 +135,7 @@ protected void shardOperationOnReplica(Request shardRequest, IndexShard replica, private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint() - && indexShard.isRemoteTranslogEnabled() == false) { + && indexShard.indexSettings().isRemoteNode() == false) { indexShard.sync(); } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 0e625e9f30320..b2eb2f03486ac 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -253,6 +253,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private volatile ReplicationCheckpoint latestReplicationCheckpoint; + private final Function isShardOnRemoteEnabledNode; + /** * Get all retention leases tracked on this shard. * @@ -999,7 +1001,8 @@ public ReplicationTracker( final LongConsumer onGlobalCheckpointUpdated, final LongSupplier currentTimeMillisSupplier, final BiConsumer> onSyncRetentionLeases, - final Supplier safeCommitInfoSupplier + final Supplier safeCommitInfoSupplier, + final Function isShardOnRemoteEnabledNode ) { this( shardId, @@ -1011,7 +1014,8 @@ public ReplicationTracker( currentTimeMillisSupplier, onSyncRetentionLeases, safeCommitInfoSupplier, - x -> {} + x -> {}, + isShardOnRemoteEnabledNode ); } @@ -1037,7 +1041,8 @@ public ReplicationTracker( final LongSupplier currentTimeMillisSupplier, final BiConsumer> onSyncRetentionLeases, final Supplier safeCommitInfoSupplier, - final Consumer onReplicationGroupUpdated + final Consumer onReplicationGroupUpdated, + final Function isShardOnRemoteEnabledNode ) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; @@ -1060,6 +1065,7 @@ public ReplicationTracker( this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; this.latestReplicationCheckpoint = indexSettings.isSegRepEnabledOrRemoteNode() ? ReplicationCheckpoint.empty(shardId) : null; + this.isShardOnRemoteEnabledNode = isShardOnRemoteEnabledNode; assert Version.V_EMPTY.equals(indexSettings.getIndexVersionCreated()) == false; assert invariant(); } @@ -1088,8 +1094,12 @@ private ReplicationGroup calculateReplicationGroup() { } else { newVersion = replicationGroup.getVersion() + 1; } - - assert indexSettings().isRemoteTranslogStoreEnabled() + assert indexSettings.isRemoteTranslogStoreEnabled() + // Handle migration cases. Ignore assertion if any of the shard copies in the replication group is assigned to a remote node + || (replicationGroup != null + && replicationGroup.getReplicationTargets() + .stream() + .anyMatch(shardRouting -> isShardOnRemoteEnabledNode.apply(shardRouting.currentNodeId()))) || checkpoints.entrySet().stream().filter(e -> e.getValue().tracked).allMatch(e -> e.getValue().replicated) : "In absence of remote translog store, all tracked shards must have replication mode as LOGICAL_REPLICATION"; @@ -1248,7 +1258,9 @@ private void createReplicationLagTimers() { if (cps.inSync && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false && isPrimaryRelocation(allocationId) == false - && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint)) { + && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint) + && (indexSettings.isSegRepLocalEnabled() == true + || isShardOnRemoteEnabledNode.apply(routingTable.getByAllocationId(allocationId).currentNodeId()))) { cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> new SegmentReplicationLagTimer()); logger.trace( () -> new ParameterizedMessage( @@ -1366,8 +1378,7 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { final ShardRouting primaryShard = routingTable.primaryShard(); final String leaseId = getPeerRecoveryRetentionLeaseId(primaryShard); if (retentionLeases.get(leaseId) == null) { - if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) - || indexSettings().isRemoteTranslogStoreEnabled()) { + if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) || indexSettings.isRemoteNode()) { assert primaryShard.allocationId().getId().equals(shardAllocationId) : routingTable.assignedShards() + " vs " + shardAllocationId; @@ -1453,7 +1464,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, inSync, inSync, - isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + initializingId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, initializingId) + ) ) ); } @@ -1472,7 +1488,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, false, false, - isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + initializingId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, initializingId) + ) ) ); } @@ -1486,7 +1507,12 @@ public synchronized void updateFromClusterManager( globalCheckpoint, true, true, - isReplicated(inSyncId, primaryAllocationId, primaryTargetAllocationId) + isReplicated( + inSyncId, + primaryAllocationId, + primaryTargetAllocationId, + assignedToRemoteStoreNode(routingTable, inSyncId) + ) ) ); } @@ -1503,6 +1529,12 @@ public synchronized void updateFromClusterManager( assert invariant(); } + private boolean assignedToRemoteStoreNode(IndexShardRoutingTable routingTable, String allocationId) { + return indexSettings().isRemoteStoreEnabled() + || (routingTable.getByAllocationId(allocationId) != null + && isShardOnRemoteEnabledNode.apply(routingTable.getByAllocationId(allocationId).currentNodeId())); + } + /** * Returns whether the requests are replicated considering the remote translog existence, current/primary/primary target allocation ids. * @@ -1511,13 +1543,16 @@ public synchronized void updateFromClusterManager( * @param primaryTargetAllocationId primary target allocation id * @return the replication mode. */ - private boolean isReplicated(String allocationId, String primaryAllocationId, String primaryTargetAllocationId) { - // If remote translog is enabled, then returns replication mode checking current allocation id against the + private boolean isReplicated( + String allocationId, + String primaryAllocationId, + String primaryTargetAllocationId, + boolean assignedToRemoteStoreNode + ) { + // If assigned to a remote node, returns true if given allocation id matches the primary or it's relocation target allocation // primary and primary target allocation id. - // If remote translog is enabled, then returns true if given allocation id matches the primary or it's relocation target allocation - // id. - if (indexSettings().isRemoteTranslogStoreEnabled()) { - return (allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId)); + if (assignedToRemoteStoreNode == true) { + return allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId); } // For other case which is local translog, return true as the requests are replicated to all shards in the replication group. return true; diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index 675d60ec2b63d..b47025d75282c 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -43,7 +43,7 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { if (didRefresh && shard.state() == IndexShardState.STARTED && shard.getReplicationTracker().isPrimaryMode() - && !shard.indexSettings.isSegRepWithRemoteEnabled()) { + && shard.indexSettings.isRemoteNode() == false) { publisher.publish(shard, shard.getLatestReplicationCheckpoint()); } return true; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 2b935b743512a..484083a5b1260 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -67,6 +67,8 @@ import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource; @@ -357,6 +359,7 @@ Runnable getGlobalCheckpointSyncer() { On source remote node , it will be REMOTE_MIGRATING_UNSEEDED when relocating from docrep node */ private final ShardMigrationState shardMigrationState; + private DiscoveryNodes discoveryNodes; public IndexShard( final ShardRouting shardRouting, @@ -386,7 +389,8 @@ public IndexShard( final String nodeId, final RecoverySettings recoverySettings, final RemoteStoreSettings remoteStoreSettings, - boolean seedRemote + boolean seedRemote, + final DiscoveryNodes discoveryNodes ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -448,7 +452,8 @@ public IndexShard( threadPool::absoluteTimeInMillis, (retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, aId, getPendingPrimaryTerm(), retentionLeases, listener), this::getSafeCommitInfo, - pendingReplicationActions + pendingReplicationActions, + isShardOnRemoteEnabledNode ); // the query cache is a node-level thing, however we want the most popular filters @@ -486,6 +491,7 @@ public boolean shouldCache(Query query) { this.remoteStoreSettings = remoteStoreSettings; this.fileDownloader = new RemoteStoreFileDownloader(shardRouting.shardId(), threadPool, recoverySettings); this.shardMigrationState = getShardMigrationState(indexSettings, seedRemote); + this.discoveryNodes = discoveryNodes; } public ThreadPool getThreadPool() { @@ -506,6 +512,23 @@ public boolean shouldSeedRemoteStore() { return shardMigrationState == REMOTE_MIGRATING_UNSEEDED; } + /** + * To be delegated to {@link ReplicationTracker} so that relevant remote store based + * operations can be ignored during engine migration + *

+ * Has explicit null checks to ensure that the {@link ReplicationTracker#invariant()} + * checks does not fail during a cluster manager state update when the latest replication group + * calculation is not yet done and the cached replication group details are available + */ + public Function isShardOnRemoteEnabledNode = nodeId -> { + DiscoveryNode node = discoveryNodes.get(nodeId); + if (node != null) { + logger.trace("Node {} has remote_enabled as {}", nodeId, node.isRemoteStoreNode()); + return node.isRemoteStoreNode(); + } + return false; + }; + public boolean isRemoteSeeded() { return shardMigrationState == REMOTE_MIGRATING_SEEDED; } @@ -616,8 +639,10 @@ public void updateShardState( final BiConsumer> primaryReplicaSyncer, final long applyingClusterStateVersion, final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable + final IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException { + this.discoveryNodes = discoveryNodes; final ShardRouting currentRouting; synchronized (mutex) { currentRouting = this.shardRouting; @@ -3495,8 +3520,8 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * When remote translog is enabled for an index, replication operation is limited to primary term validation and does not * update local checkpoint at replica, so the local checkpoint at replica can be less than globalCheckpoint. */ - assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) - || indexSettings.isRemoteTranslogStoreEnabled() : "supposedly in-sync shard copy received a global checkpoint [" + assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) || indexSettings.isRemoteNode() + : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" @@ -3993,7 +4018,7 @@ private boolean isRemoteStoreEnabled() { } public boolean isRemoteTranslogEnabled() { - return indexSettings() != null && indexSettings().isRemoteTranslogStoreEnabled(); + return indexSettings() != null && (indexSettings().isRemoteTranslogStoreEnabled()); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index ba78c28f3db88..4f68c03913199 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -54,6 +54,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; @@ -1021,7 +1022,8 @@ public IndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final DiscoveryNodes discoveryNodes ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); @@ -1036,7 +1038,8 @@ public IndexShard createShard( remoteStoreStatsTrackerFactory, repositoriesService, targetNode, - sourceNode + sourceNode, + discoveryNodes ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 7fb8b172ae352..2c3ffcdd9e0ba 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -680,7 +680,8 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR retentionLeaseSyncer, nodes.getLocalNode(), sourceNode, - remoteStoreStatsTrackerFactory + remoteStoreStatsTrackerFactory, + nodes ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); @@ -714,7 +715,8 @@ private void updateShard( primaryReplicaSyncer::resync, clusterState.version(), inSyncIds, - indexShardRoutingTable + indexShardRoutingTable, + nodes ); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); @@ -922,7 +924,8 @@ void updateShardState( BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable + IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException; } @@ -1040,7 +1043,8 @@ T createShard( RetentionLeaseSyncer retentionLeaseSyncer, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, - RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + DiscoveryNodes discoveryNodes ) throws IOException; /** diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java index 0ccb1ac2133cf..96e85154e6248 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java @@ -23,8 +23,7 @@ public static RecoverySourceHandler create( StartRecoveryRequest request, RecoverySettings recoverySettings ) { - boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false - && (shard.isRemoteTranslogEnabled() || shard.isMigratingToRemote()); + boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false && request.targetNode().isRemoteStoreNode(); if (isReplicaRecoveryWithRemoteTranslog) { return new RemoteStorePeerRecoverySourceHandler( shard, diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 16311d5d2cfb7..f47b082de3856 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -213,10 +213,17 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener { state().getIndex().setFileDetailsComplete(); // ops-based recoveries don't send the file details state().getTranslog().totalOperations(totalTranslogOps); + // Cleanup remote contents before opening new translog. + // This prevents reading from any old Translog UUIDs during re-seeding + // (situation in which primary fails over to docrep replica and is re-seeded to remote again) + // which might end up causing a TranslogCorruptedException + if (indexShard.shouldSeedRemoteStore()) { + assert indexShard.routingEntry().primary() : "Remote seeding should only true be for primary shard copy"; + indexShard.deleteRemoteStoreContents(); + } indexShard().openEngineAndSkipTranslogRecovery(); // upload to remote store in migration for primary shard - if (indexShard.shouldSeedRemoteStore() && indexShard.routingEntry().primary()) { - indexShard.deleteRemoteStoreContents(); + if (indexShard.shouldSeedRemoteStore()) { // This cleans up remote translog's 0 generation, as we don't want to get that uploaded indexShard.sync(); threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> { indexShard.refresh("remote store migration"); }); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 852003c9f3e4d..657705a8cd725 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -38,7 +38,7 @@ public SegmentReplicationSourceFactory( } public SegmentReplicationSource get(IndexShard shard) { - if (shard.indexSettings().isSegRepWithRemoteEnabled()) { + if (shard.indexSettings().isRemoteNode()) { return new RemoteStoreReplicationSource(shard); } else { return new PrimaryShardReplicationSource( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 821ae42e31881..4d7d0a8633b5c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -98,7 +98,7 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.isRemoteTranslogEnabled()) { + if (indexShard.indexSettings().isRemoteNode()) { return ReplicationMode.FULL_REPLICATION; } return super.getReplicationMode(indexShard); @@ -199,6 +199,12 @@ protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexSh Objects.requireNonNull(replica); ActionListener.completeWith(listener, () -> { logger.trace(() -> new ParameterizedMessage("Checkpoint {} received on replica {}", request, replica.shardId())); + // Condition for ensuring that we ignore Segrep checkpoints received on Docrep shard copies. + // This case will hit iff the replica hosting node is not remote enabled and replication type != SEGMENT + if (replica.indexSettings().isRemoteNode() == false && replica.indexSettings().isSegRepLocalEnabled() == false) { + logger.trace("Received segrep checkpoint on a docrep shard copy during an ongoing remote migration. NoOp."); + return new ReplicaResult(); + } if (request.getCheckpoint().getShardId().equals(replica.shardId())) { replicationService.onNewCheckpoint(request.getCheckpoint(), replica); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index ef26bc225b0c7..5ca5f53f180be 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -82,6 +82,7 @@ import org.mockito.ArgumentCaptor; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.arrayWithSize; @@ -332,15 +333,15 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { TransportVerifyShardBeforeCloseAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportVerifyShardBeforeCloseAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); - assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } private TransportVerifyShardBeforeCloseAction createAction() { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java index 09215088bd04b..c9d3a6c4c7605 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/flush/TransportShardFlushActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportShardFlushActionTests extends OpenSearchTestCase { public void testGetReplicationModeWithRemoteTranslog() { TransportShardFlushAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardFlushAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java index 8c4a6c023f9a5..90498d6d35700 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/readonly/TransportVerifyShardIndexBlockActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportVerifyShardIndexBlockActionTests extends OpenSearchTestCas public void testGetReplicationModeWithRemoteTranslog() { TransportVerifyShardIndexBlockAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportVerifyShardIndexBlockAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java index b2eee904bad38..bc0b7e5cf14b2 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/refresh/TransportShardRefreshActionTests.java @@ -20,6 +20,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,14 +29,14 @@ public class TransportShardRefreshActionTests extends OpenSearchTestCase { public void testGetReplicationModeWithRemoteTranslog() { TransportShardRefreshAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardRefreshAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index 65b555649b2d0..6331861c3dcb9 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -107,6 +107,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongSupplier; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; @@ -1237,14 +1238,14 @@ public void testHandlePrimaryTermValidationRequestSuccess() { public void testGetReplicationModeWithRemoteTranslog() { TransportShardBulkAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.PRIMARY_TERM_VALIDATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { TransportShardBulkAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index da87a0a967f53..a2fefd6278321 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -84,6 +84,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.opensearch.test.ClusterServiceUtils.setState; import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; @@ -233,14 +234,14 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { final TransportResyncReplicationAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final TransportResyncReplicationAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java new file mode 100644 index 0000000000000..626c2f74f09c4 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationModeAwareProxyTests.java @@ -0,0 +1,216 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShardTestUtils; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; + +public class ReplicationModeAwareProxyTests extends OpenSearchTestCase { + + /* + Replication action running on the same primary copy from which it originates. + Action should not run and proxy should return ReplicationMode.NO_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingCurrentPrimary() { + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + null, + true, + ShardRoutingState.STARTED, + AllocationId.newInitializing("abc") + ); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + null, + true, + ShardRoutingState.STARTED, + AllocationId.newInitializing("abc") + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder().add(IndexShardTestUtils.getFakeRemoteEnabledNode("dummy-node")).build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + randomBoolean() + ); + assertEquals(ReplicationMode.NO_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from failing primary to replica being promoted to primary + Action should run and proxy should return ReplicationMode.FULL_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingRelocatingPrimary() { + AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing()); + AllocationId relocationTargetId = AllocationId.newTargetRelocation(primaryId); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + null, + true, + ShardRoutingState.INITIALIZING, + relocationTargetId + ); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + "dummy-node-2", + true, + ShardRoutingState.RELOCATING, + primaryId + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + randomBoolean() + ); + assertEquals(ReplicationMode.FULL_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to docrep replica during remote store migration + Action should run and proxy should return ReplicationMode.FULL_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingDocrepShard() { + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + true, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + false, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeDiscoNode(targetRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(ReplicationMode.FULL_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to remote replica during remote store migration + Action should not run and proxy should return ReplicationMode.NO_REPLICATION + */ + public void testDetermineReplicationModeTargetRoutingRemoteShard() { + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(ReplicationMode.NO_REPLICATION, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary to remote enabled replica during remote store migration + with an explicit replication mode specified + Action should run and proxy should return the overridden Replication Mode + */ + public void testDetermineReplicationWithExplicitOverrideTargetRoutingRemoteShard() { + ReplicationMode replicationModeOverride = ReplicationMode.PRIMARY_TERM_VALIDATION; + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + replicationModeOverride, + DiscoveryNodes.builder() + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(targetRouting.currentNodeId())) + .add(IndexShardTestUtils.getFakeRemoteEnabledNode(primaryRouting.currentNodeId())) + .build(), + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + false + ); + assertEquals(replicationModeOverride, replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting)); + } + + /* + Replication action originating from remote enabled primary with remote enabled index settings enabled + Action should not query the DiscoveryNodes object + */ + public void testDetermineReplicationWithRemoteIndexSettingsEnabled() { + DiscoveryNodes mockDiscoveryNodes = mock(DiscoveryNodes.class); + ShardRouting primaryRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node", + false, + ShardRoutingState.STARTED + ); + ShardRouting targetRouting = TestShardRouting.newShardRouting( + new ShardId(new Index("test_index", "_na_"), 0), + "dummy-node-2", + true, + ShardRoutingState.STARTED + ); + final ReplicationModeAwareProxy replicationModeAwareProxy = new ReplicationModeAwareProxy( + ReplicationMode.NO_REPLICATION, + mockDiscoveryNodes, + mock(TransportReplicationAction.ReplicasProxy.class), + mock(TransportReplicationAction.ReplicasProxy.class), + true + ); + replicationModeAwareProxy.determineReplicationMode(targetRouting, primaryRouting); + // Verify no interactions with the DiscoveryNodes object + verify(mockDiscoveryNodes, never()).get(anyString()); + } +} diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 6b54623b03164..ec5fc1d19e40d 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.set.Sets; @@ -59,6 +60,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.IndexShardNotStartedException; import org.opensearch.index.shard.IndexShardState; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.shard.ReplicationGroup; import org.opensearch.node.NodeClosedException; import org.opensearch.test.OpenSearchTestCase; @@ -239,7 +241,13 @@ public void testReplicationWithRemoteTranslogEnabled() throws Exception { listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildRemoteStoreEnabledDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + true + ) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -304,7 +312,13 @@ public void testPrimaryToPrimaryReplicationWithRemoteTranslogEnabled() throws Ex listener, replicasProxy, 0, - new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, replicasProxy, replicasProxy) + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildRemoteStoreEnabledDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + true + ) ); op.execute(); assertTrue("request was not processed on primary", request.processedOnPrimary.get()); @@ -380,6 +394,144 @@ public void testForceReplicationWithRemoteTranslogEnabled() throws Exception { assertEquals(activeIds.size() + initializingIds.size(), shardInfo.getTotal()); } + public void testReplicationInDualModeWithDocrepReplica() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId); + builder.addShard(routing); + }); + activeIds.stream().filter(aId -> !aId.equals(primaryId)).forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId); + builder.addShard(routing); + }); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>( + ReplicationMode.NO_REPLICATION, + buildDiscoveryNodes(routingTable), + replicasProxy, + replicasProxy, + false + ) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + // During dual replication, except for primary, replication action should be executed on all the replicas + assertEquals(activeIds.size() - 1, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + // All initializing and active shards are set to docrep + assertEquals(initializingIds.size() + activeIds.size(), shardInfo.getTotal()); + } + + public void testReplicationInDualModeWithMixedReplicasSomeInDocrepOthersOnRemote() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId); + builder.addShard(routing); + }); + activeIds.stream().filter(aId -> !aId.equals(primaryId)).forEach(aId -> { + ShardRouting routing = newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId); + builder.addShard(routing); + }); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + // Generating data nodes in mixed mode wherein some of the allocated replicas + // are in docrep nodes whereas others are on remote enabled ones + Tuple discoveryNodesDetails = buildMixedModeDiscoveryNodes(routingTable); + int docRepNodes = discoveryNodesDetails.v1(); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION, discoveryNodesDetails.v2(), replicasProxy, replicasProxy, false) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + // Only docrep nodes should have the request fanned out to + assertEquals(docRepNodes, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + // Listener should be invoked for initializing Ids, primary and the operations on docrep nodes + assertEquals(1 + docRepNodes + initializingIds.size(), shardInfo.getTotal()); + } + static String nodeIdFromAllocationId(final AllocationId allocationId) { return "n-" + allocationId.getId().substring(0, 8); } @@ -816,6 +968,46 @@ private Set getExpectedReplicas(ShardId shardId, ClusterState stat return expectedReplicas; } + private DiscoveryNodes buildRemoteStoreEnabledDiscoveryNodes(IndexShardRoutingTable routingTable) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } + return builder.build(); + } + + private DiscoveryNodes buildDiscoveryNodes(IndexShardRoutingTable routingTable) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + if (shardRouting.primary()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(shardRouting.currentNodeId())); + } + } + return builder.build(); + } + + private Tuple buildMixedModeDiscoveryNodes(IndexShardRoutingTable routingTable) { + int docrepNodes = 0; + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting shardRouting : routingTable) { + if (shardRouting.primary()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } else { + // Only add docrep nodes for allocationIds that are active + // since the test cases creates replication group with active allocationIds only + if (shardRouting.active() && randomBoolean()) { + builder.add(IndexShardTestUtils.getFakeDiscoNode(shardRouting.currentNodeId())); + docrepNodes += 1; + } else { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(shardRouting.currentNodeId())); + } + } + } + return new Tuple<>(docrepNodes, builder.build()); + } + public static class Request extends ReplicationRequest { public AtomicBoolean processedOnPrimary = new AtomicBoolean(); public AtomicBoolean runPostReplicationActionsOnPrimary = new AtomicBoolean(); diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index dad0fa0efd3ec..4a18778cc0b2b 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -78,6 +78,7 @@ import org.opensearch.core.transport.TransportResponse; import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; +import org.opensearch.index.remote.RemoteStoreTestsHelper; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.IndexShardState; @@ -1589,9 +1590,15 @@ private IndexService mockIndexService(final IndexMetadata indexMetadata, Cluster @SuppressWarnings("unchecked") private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService) { + return mockIndexShard(shardId, clusterService, false); + } + + @SuppressWarnings("unchecked") + private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService, boolean remote) { final IndexShard indexShard = mock(IndexShard.class); when(indexShard.shardId()).thenReturn(shardId); when(indexShard.state()).thenReturn(IndexShardState.STARTED); + when(indexShard.indexSettings()).thenReturn(RemoteStoreTestsHelper.createIndexSettings(remote)); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; if (isPrimaryMode.get()) { diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java index e072d3037caad..043b4493e8989 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreTestsHelper.java @@ -10,6 +10,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; @@ -37,4 +38,22 @@ static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) when(indexShard.store()).thenReturn(store); return indexShard; } + + public static IndexSettings createIndexSettings(boolean remote) { + return createIndexSettings(remote, Settings.EMPTY); + } + + public static IndexSettings createIndexSettings(boolean remote, Settings settings) { + IndexSettings indexSettings; + if (remote) { + Settings nodeSettings = Settings.builder() + .put("node.name", "xyz") + .put("node.attr.remote_store.translog.repository", "seg_repo") + .build(); + indexSettings = IndexSettingsModule.newIndexSettings(new Index("test_index", "_na_"), settings, nodeSettings); + } else { + indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + } + return indexSettings; + } } diff --git a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java index 8c59e92a3fe8a..904c9a70e61e0 100644 --- a/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RetentionLeasesReplicationTests.java @@ -45,6 +45,7 @@ import org.opensearch.index.seqno.RetentionLeaseUtils; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.test.VersionUtils; import java.util.ArrayList; @@ -182,7 +183,8 @@ public void testTurnOffTranslogRetentionAfterAllShardStarted() throws Exception null, 1L, group.getPrimary().getReplicationGroup().getInSyncAllocationIds(), - group.getPrimary().getReplicationGroup().getRoutingTable() + group.getPrimary().getReplicationGroup().getRoutingTable(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); } group.syncGlobalCheckpoint(); diff --git a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java index 8363ea3757a2b..a27f3476888eb 100644 --- a/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/GlobalCheckpointSyncActionTests.java @@ -54,6 +54,7 @@ import java.util.Collections; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -110,6 +111,7 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { final ShardId shardId = new ShardId(index, id); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); final Translog.Durability durability = randomFrom(Translog.Durability.ASYNC, Translog.Durability.REQUEST); when(indexShard.getTranslogDurability()).thenReturn(durability); @@ -158,14 +160,14 @@ public void testTranslogSyncAfterGlobalCheckpointSync() throws Exception { public void testGetReplicationModeWithRemoteTranslog() { final GlobalCheckpointSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final GlobalCheckpointSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } @@ -191,6 +193,7 @@ public void testMayBeSyncTranslogWithRemoteTranslog() throws Exception { when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(globalCheckpoint - 1); when(indexShard.getTranslogDurability()).thenReturn(Translog.Durability.REQUEST); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); action.shardOperationOnPrimary(primaryRequest, indexShard, ActionTestUtils.assertNoFailureListener(r -> {})); verify(indexShard, never()).sync(); @@ -205,6 +208,7 @@ public void testMayBeSyncTranslogWithLocalTranslog() throws Exception { when(indexShard.getLastKnownGlobalCheckpoint()).thenReturn(globalCheckpoint); when(indexShard.getLastSyncedGlobalCheckpoint()).thenReturn(globalCheckpoint - 1); when(indexShard.getTranslogDurability()).thenReturn(Translog.Durability.REQUEST); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); action.shardOperationOnPrimary(primaryRequest, indexShard, ActionTestUtils.assertNoFailureListener(r -> {})); verify(indexShard).sync(); diff --git a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java index ca80c7b9c4884..7a9f1d7baa12e 100644 --- a/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/PeerRecoveryRetentionLeaseExpiryTests.java @@ -93,7 +93,8 @@ public void setUpReplicationTracker() throws InterruptedException { value -> {}, currentTimeMillis::get, (leases, listener) -> {}, - () -> safeCommitInfo + () -> safeCommitInfo, + sId -> false ); replicationTracker.updateFromClusterManager( 1L, diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 3cd60ac973709..fdbe89422a2aa 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -84,7 +84,8 @@ public void testAddOrRenewRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -132,7 +133,8 @@ public void testAddDuplicateRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -164,7 +166,8 @@ public void testRenewNotFoundRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -202,7 +205,8 @@ public void testAddRetentionLeaseCausesRetentionLeaseSync() { equalTo(retainingSequenceNumbers) ); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); reference.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -241,7 +245,8 @@ public void testRemoveRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -305,7 +310,8 @@ public void testCloneRetentionLease() { assertTrue(synced.compareAndSet(false, true)); listener.onResponse(new ReplicationResponse()); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTrackerRef.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -351,7 +357,8 @@ public void testCloneNonexistentRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -380,7 +387,8 @@ public void testCloneDuplicateRetentionLease() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -413,7 +421,8 @@ public void testRemoveNotFound() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -451,7 +460,8 @@ public void testRemoveRetentionLeaseCausesRetentionLeaseSync() { equalTo(retainingSequenceNumbers) ); }, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); reference.set(replicationTracker); replicationTracker.updateFromClusterManager( @@ -504,7 +514,8 @@ private void runExpirationTest(final boolean primaryMode) { value -> {}, currentTimeMillis::get, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -583,7 +594,8 @@ public void testReplicaIgnoresOlderRetentionLeasesVersion() { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -636,7 +648,8 @@ public void testLoadAndPersistRetentionLeases() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -671,7 +684,8 @@ public void testUnnecessaryPersistenceOfRetentionLeases() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -724,7 +738,8 @@ public void testPersistRetentionLeasesUnderConcurrency() throws IOException { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), @@ -788,7 +803,8 @@ public void testRenewLeaseWithLowerRetainingSequenceNumber() throws Exception { value -> {}, () -> 0L, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); replicationTracker.updateFromClusterManager( randomNonNegativeLong(), diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java index e61d27695a5e5..daeefeff59c94 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java @@ -40,11 +40,13 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.SafeCommitInfo; +import org.opensearch.index.remote.RemoteStoreTestsHelper; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import java.util.Collections; import java.util.Set; +import java.util.function.Function; import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -57,18 +59,20 @@ ReplicationTracker newTracker( final AllocationId allocationId, final LongConsumer updatedGlobalCheckpoint, final LongSupplier currentTimeMillisSupplier, - final Settings settings + final Settings settings, + final boolean remote ) { return new ReplicationTracker( new ShardId("test", "_na_", 0), allocationId.getId(), - IndexSettingsModule.newIndexSettings("test", settings), + remote ? RemoteStoreTestsHelper.createIndexSettings(true, settings) : IndexSettingsModule.newIndexSettings("test", settings), randomNonNegativeLong(), UNASSIGNED_SEQ_NO, updatedGlobalCheckpoint, currentTimeMillisSupplier, (leases, listener) -> {}, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + remote ? REMOTE_DISCOVERY_NODE : NON_REMOTE_DISCOVERY_NODE ); } @@ -80,8 +84,21 @@ ReplicationTracker newTracker( return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, Settings.EMPTY); } + ReplicationTracker newTracker( + final AllocationId allocationId, + final LongConsumer updatedGlobalCheckpoint, + final LongSupplier currentTimeMillisSupplier, + final Settings settings + ) { + return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, settings, false); + } + static final Supplier OPS_BASED_RECOVERY_ALWAYS_REASONABLE = () -> SafeCommitInfo.EMPTY; + static final Function NON_REMOTE_DISCOVERY_NODE = shardId -> false; + + static final Function REMOTE_DISCOVERY_NODE = shardId -> true; + static String nodeIdFromAllocationId(final AllocationId allocationId) { return "n-" + allocationId.getId().substring(0, 8); } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 7971591e82bab..233a99cbe4a73 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -446,6 +446,10 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { private AtomicLong updatedGlobalCheckpoint = new AtomicLong(UNASSIGNED_SEQ_NO); + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings, boolean remote) { + return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings, remote); + } + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings) { return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings); } @@ -759,7 +763,8 @@ public void testPrimaryContextHandoff() throws IOException { onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); ReplicationTracker newPrimary = new ReplicationTracker( shardId, @@ -770,7 +775,8 @@ public void testPrimaryContextHandoff() throws IOException { onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + NON_REMOTE_DISCOVERY_NODE ); Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); @@ -1300,7 +1306,7 @@ public void testGlobalCheckpointUpdateWithRemoteTranslogEnabled() { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); @@ -1378,7 +1384,7 @@ public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); @@ -1476,7 +1482,7 @@ public void testMarkAllocationIdAsInSyncWithRemoteTranslogEnabled() throws Excep .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); tracker.activatePrimaryMode(localCheckpoint); @@ -1504,7 +1510,7 @@ public void testMissingActiveIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); List initializingRandomSubset = randomSubsetOf(initializing.keySet()); @@ -1537,7 +1543,7 @@ public void testMissingInSyncIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach( @@ -1606,8 +1612,8 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManagerWithRemoteTrans .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); - tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, active, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); @@ -1655,7 +1661,7 @@ public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") .build(); - final ReplicationTracker tracker = newTracker(primaryId, settings); + final ReplicationTracker tracker = newTracker(primaryId, settings, true); tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); @@ -2080,7 +2086,8 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + REMOTE_DISCOVERY_NODE ); ReplicationTracker newPrimary = new ReplicationTracker( shardId, @@ -2091,7 +2098,8 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept onUpdate, () -> 0L, onNewRetentionLease, - OPS_BASED_RECOVERY_ALWAYS_REASONABLE + OPS_BASED_RECOVERY_ALWAYS_REASONABLE, + REMOTE_DISCOVERY_NODE ); Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java index ed04d9a20f18e..d5d7163b66698 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseBackgroundSyncActionTests.java @@ -60,6 +60,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; @@ -215,14 +216,14 @@ public void testBlocks() { public void testGetReplicationModeWithRemoteTranslog() { final RetentionLeaseBackgroundSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final RetentionLeaseBackgroundSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java index 63a9ac2f2e8ec..7610b8bc39296 100644 --- a/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -60,6 +60,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.emptyMap; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; @@ -215,15 +216,15 @@ public void testBlocks() { public void testGetReplicationModeWithRemoteTranslog() { final RetentionLeaseSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final RetentionLeaseSyncAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); - assertEquals(ReplicationMode.NO_REPLICATION, action.getReplicationMode(indexShard)); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } private RetentionLeaseSyncAction createAction() { diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 537bfcf8f8a6b..e5bfa8caee79a 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -697,7 +697,8 @@ public void testPrimaryPromotionRollsGeneration() throws Exception { (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build() + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(primaryRouting) ); /* @@ -764,7 +765,8 @@ public void testOperationPermitsOnPrimaryShards() throws Exception { }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), - new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build() + new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(primaryRouting) ); latch.await(); assertThat(indexShard.getActiveOperationsCount(), is(oneOf(0, IndexShard.OPERATIONS_BLOCKED))); @@ -1446,7 +1448,8 @@ public void onFailure(Exception e) { (s, r) -> resyncLatch.countDown(), 1L, Collections.singleton(newRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build() + new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(newRouting) ); resyncLatch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); @@ -3284,7 +3287,7 @@ public void testRecoverFromTranslog() throws IOException { Translog.Snapshot snapshot = TestTranslog.newSnapshotFromOperations(operations); primary.markAsRecovering( "store", - new RecoveryState(primary.routingEntry(), getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) + new RecoveryState(primary.routingEntry(), IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) ); recoverFromStore(primary); @@ -4029,15 +4032,19 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { if (isPrimary) { newShard.markAsRecovering( "store", - new RecoveryState(newShard.routingEntry(), getFakeDiscoNode(newShard.routingEntry().currentNodeId()), null) + new RecoveryState( + newShard.routingEntry(), + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()), + null + ) ); } else { newShard.markAsRecovering( "peer", new RecoveryState( newShard.routingEntry(), - getFakeDiscoNode(newShard.routingEntry().currentNodeId()), - getFakeDiscoNode(newShard.routingEntry().currentNodeId()) + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()), + IndexShardTestUtils.getFakeDiscoNode(newShard.routingEntry().currentNodeId()) ) ); } diff --git a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java index b1bcaac2c1947..09903a8b44cb5 100644 --- a/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/PrimaryReplicaSyncerTests.java @@ -111,7 +111,8 @@ public void testSyncerSendsOffCorrectDocuments() throws Exception { null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build() + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); assertEquals(globalCheckPoint, shard.getLastKnownGlobalCheckpoint()); @@ -190,7 +191,8 @@ public void testSyncerOnClosingShard() throws Exception { null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build() + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), + IndexShardTestUtils.getFakeDiscoveryNodes(shard.routingEntry()) ); CountDownLatch syncCalledLatch = new CountDownLatch(1); diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 0e16e81b1bb70..0428bdf0655b0 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingHelper; @@ -164,7 +165,8 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem null, null, localNode, - null + null, + DiscoveryNodes.builder().add(localNode).build() ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index c455101ff4549..0490228a5cc16 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -35,6 +35,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; @@ -264,7 +265,8 @@ public MockIndexShard createShard( final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, final DiscoveryNode sourceNode, - final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory + final RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + final DiscoveryNodes discoveryNodes ) throws IOException { failRandomly(); RecoveryState recoveryState = new RecoveryState(shardRouting, targetNode, sourceNode); @@ -387,7 +389,8 @@ public void updateShardState( BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable + IndexShardRoutingTable routingTable, + DiscoveryNodes discoveryNodes ) throws IOException { failRandomly(); assertThat(this.shardId(), equalTo(shardRouting.shardId())); diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java index 4fbae4b0d53ca..ded174fb98eef 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoverySourceServiceTests.java @@ -38,6 +38,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.test.NodeRoles; @@ -65,8 +66,8 @@ public void testDuplicateRecoveries() throws IOException { StartRecoveryRequest startRecoveryRequest = new StartRecoveryRequest( primary.shardId(), randomAlphaOfLength(10), - getFakeDiscoNode("source"), - getFakeDiscoNode("target"), + IndexShardTestUtils.getFakeDiscoNode("source"), + IndexShardTestUtils.getFakeDiscoNode("target"), Store.MetadataSnapshot.EMPTY, randomBoolean(), randomLong(), diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 1e6cc43703672..a8e5a02011538 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -56,6 +56,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; @@ -92,8 +93,8 @@ public void testWriteFileChunksConcurrently() throws Exception { mdFiles.add(md); } final IndexShard targetShard = newShard(false); - final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); - final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); + final DiscoveryNode pNode = IndexShardTestUtils.getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); + final DiscoveryNode rNode = IndexShardTestUtils.getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode)); final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null, threadPool); final PlainActionFuture receiveFileInfoFuture = new PlainActionFuture<>(); diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java index 2cf006176022d..352f827c74cb2 100644 --- a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -15,6 +15,7 @@ import org.opensearch.action.support.replication.ReplicationMode; import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; @@ -35,9 +36,12 @@ import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; +import static org.opensearch.index.remote.RemoteStoreTestsHelper.createIndexSettings; import static org.opensearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -128,7 +132,9 @@ public void testPublishCheckpointActionOnReplica() { final ShardId shardId = new ShardId(index, id); when(indexShard.shardId()).thenReturn(shardId); - + when(indexShard.indexSettings()).thenReturn( + createIndexSettings(false, Settings.builder().put(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING.getKey(), "SEGMENT").build()) + ); final SegmentReplicationTargetService mockTargetService = mock(SegmentReplicationTargetService.class); final PublishCheckpointAction action = new PublishCheckpointAction( @@ -160,17 +166,46 @@ public void testPublishCheckpointActionOnReplica() { } + public void testPublishCheckpointActionOnDocrepReplicaDuringMigration() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); + final SegmentReplicationTargetService mockTargetService = mock(SegmentReplicationTargetService.class); + + final PublishCheckpointAction action = new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + mockTargetService + ); + // no interaction with SegmentReplicationTargetService object + verify(mockTargetService, never()).onNewCheckpoint(any(), any()); + } + public void testGetReplicationModeWithRemoteTranslog() { final PublishCheckpointAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(true); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(true)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } public void testGetReplicationModeWithLocalTranslog() { final PublishCheckpointAction action = createAction(); final IndexShard indexShard = mock(IndexShard.class); - when(indexShard.isRemoteTranslogEnabled()).thenReturn(false); + when(indexShard.indexSettings()).thenReturn(createIndexSettings(false)); assertEquals(ReplicationMode.FULL_REPLICATION, action.getReplicationMode(indexShard)); } diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 43289a7c89524..1cb5501810c5d 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -898,7 +898,8 @@ public EngineConfig config( update -> {}, () -> 0L, (leases, listener) -> listener.onResponse(new ReplicationResponse()), - () -> SafeCommitInfo.EMPTY + () -> SafeCommitInfo.EMPTY, + sId -> false ); globalCheckpointSupplier = replicationTracker; retentionLeasesSupplier = replicationTracker::getRetentionLeases; diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index e6e20ce8f8566..3226035bba97b 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -64,6 +64,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; @@ -96,6 +97,7 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.IndexShardTestUtils; import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.translog.Translog; @@ -340,6 +342,23 @@ public synchronized void startAll() throws IOException { startReplicas(replicas.size()); } + public synchronized DiscoveryNodes generateFakeDiscoveryNodes() { + DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(); + if (primary.indexSettings() != null && primary.indexSettings().isRemoteNode()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId())); + } + for (IndexShard replica : replicas) { + if (replica.indexSettings() != null && replica.indexSettings().isRemoteNode()) { + builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId())); + } else { + builder.add(IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId())); + } + } + return builder.build(); + } + public synchronized int startReplicas(int numOfReplicasToStart) throws IOException { if (primary.routingEntry().initializing()) { startPrimary(); @@ -371,7 +390,8 @@ public void startPrimary() throws IOException { null, currentClusterStateVersion.incrementAndGet(), activeIds, - routingTable + routingTable, + generateFakeDiscoveryNodes() ); for (final IndexShard replica : replicas) { recoverReplica(replica); @@ -492,7 +512,8 @@ public synchronized void promoteReplicaToPrimary( primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(), activeIds(), - routingTable + routingTable, + generateFakeDiscoveryNodes() ); } @@ -638,14 +659,16 @@ public void syncGlobalCheckpoint() { } private void updateAllocationIDsOnPrimary() throws IOException { - primary.updateShardState( primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), activeIds(), - routingTable(Function.identity()) + routingTable(Function.identity()), + primary.indexSettings().isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable(Function.identity()).getShards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable(Function.identity()).getShards()) ); } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 80d16a8243634..b2ece9c813802 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -619,12 +620,14 @@ protected IndexShard newShard( IndexingOperationListener... listeners ) throws IOException { Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); + DiscoveryNodes discoveryNodes = IndexShardTestUtils.getFakeDiscoveryNodes(routing); // To simulate that the node is remote backed if (indexMetadata.getSettings().get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) == "true") { nodeSettings = Settings.builder() .put("node.name", routing.currentNodeId()) .put("node.attr.remote_store.translog.repository", "seg_repo") .build(); + discoveryNodes = DiscoveryNodes.builder().add(IndexShardTestUtils.getFakeRemoteEnabledNode(routing.currentNodeId())).build(); } final IndexSettings indexSettings = new IndexSettings(indexMetadata, nodeSettings); final IndexShard indexShard; @@ -712,7 +715,8 @@ protected IndexShard newShard( "dummy-node", DefaultRecoverySettings.INSTANCE, DefaultRemoteStoreSettings.INSTANCE, - false + false, + discoveryNodes ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); if (remoteStoreStatsTrackerFactory != null) { @@ -986,7 +990,7 @@ protected void closeShards(Iterable shards) throws IOException { protected void recoverShardFromStore(IndexShard primary) throws IOException { primary.markAsRecovering( "store", - new RecoveryState(primary.routingEntry(), getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) + new RecoveryState(primary.routingEntry(), IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()), null) ); recoverFromStore(primary); updateRoutingEntry(primary, ShardRoutingHelper.moveToStarted(primary.routingEntry())); @@ -1003,7 +1007,19 @@ public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRoutin null, currentClusterStateVersion.incrementAndGet(), inSyncIds, - newRoutingTable + newRoutingTable, + DiscoveryNodes.builder() + .add( + new DiscoveryNode( + shardRouting.currentNodeId(), + shardRouting.currentNodeId(), + buildNewFakeTransportAddress(), + Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ) + ) + .build() ); } @@ -1017,17 +1033,6 @@ protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) th } } - protected DiscoveryNode getFakeDiscoNode(String id) { - return new DiscoveryNode( - id, - id, - buildNewFakeTransportAddress(), - Collections.emptyMap(), - DiscoveryNodeRole.BUILT_IN_ROLES, - Version.CURRENT - ); - } - protected void recoverReplica(IndexShard replica, IndexShard primary, boolean startReplica) throws IOException { recoverReplica(replica, primary, startReplica, getReplicationFunc(replica)); } @@ -1104,7 +1109,7 @@ protected void recoverReplica( * @param targetSupplier supplies an instance of {@link RecoveryTarget} * @param markAsRecovering set to {@code false} if the replica is marked as recovering */ - protected final void recoverUnstartedReplica( + public final void recoverUnstartedReplica( final IndexShard replica, final IndexShard primary, final BiFunction targetSupplier, @@ -1113,8 +1118,18 @@ protected final void recoverUnstartedReplica( final IndexShardRoutingTable routingTable, final Function, List> replicatePrimaryFunction ) throws IOException { - final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId()); - final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId()); + final DiscoveryNode pNode; + final DiscoveryNode rNode; + if (primary.isRemoteTranslogEnabled()) { + pNode = IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId()); + } else { + pNode = IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId()); + } + if (replica.isRemoteTranslogEnabled()) { + rNode = IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId()); + } else { + rNode = IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId()); + } if (markAsRecovering) { replica.markAsRecovering("remote", new RecoveryState(replica.routingEntry(), pNode, rNode)); } else { @@ -1155,7 +1170,10 @@ protected final void recoverUnstartedReplica( null, currentClusterStateVersion.incrementAndGet(), inSyncIds, - routingTable + routingTable, + primary.isRemoteTranslogEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.getShards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.getShards()) ); try { PlainActionFuture future = new PlainActionFuture<>(); @@ -1189,7 +1207,10 @@ protected void startReplicaAfterRecovery( null, currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, - newRoutingTable + newRoutingTable, + primary.indexSettings.isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.shards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.shards()) ); replica.updateShardState( replica.routingEntry().moveToStarted(), @@ -1197,7 +1218,10 @@ protected void startReplicaAfterRecovery( null, currentClusterStateVersion.get(), inSyncIdsWithReplica, - newRoutingTable + newRoutingTable, + replica.indexSettings.isRemoteTranslogStoreEnabled() + ? IndexShardTestUtils.getFakeRemoteEnabledDiscoveryNodes(routingTable.shards()) + : IndexShardTestUtils.getFakeDiscoveryNodes(routingTable.shards()) ); } @@ -1226,7 +1250,8 @@ protected void promoteReplica(IndexShard replica, Set inSyncIds, IndexSh ), currentClusterStateVersion.incrementAndGet(), inSyncIds, - newRoutingTable + newRoutingTable, + IndexShardTestUtils.getFakeDiscoveryNodes(routingEntry) ); } @@ -1370,7 +1395,7 @@ protected void recoverShardFromSnapshot(final IndexShard shard, final Snapshot s final Version version = Version.CURRENT; final ShardId shardId = shard.shardId(); final IndexId indexId = new IndexId(shardId.getIndex().getName(), shardId.getIndex().getUUID()); - final DiscoveryNode node = getFakeDiscoNode(shard.routingEntry().currentNodeId()); + final DiscoveryNode node = IndexShardTestUtils.getFakeDiscoNode(shard.routingEntry().currentNodeId()); final RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java new file mode 100644 index 0000000000000..d3a4a95c3bdef --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestUtils.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class IndexShardTestUtils { + public static DiscoveryNode getFakeDiscoNode(String id) { + return new DiscoveryNode( + id, + id, + IndexShardTestCase.buildNewFakeTransportAddress(), + Collections.emptyMap(), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + } + + public static DiscoveryNode getFakeRemoteEnabledNode(String id) { + Map remoteNodeAttributes = new HashMap(); + remoteNodeAttributes.put(RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, "test-repo"); + return new DiscoveryNode( + id, + id, + IndexShardTestCase.buildNewFakeTransportAddress(), + remoteNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + } + + public static DiscoveryNodes getFakeDiscoveryNodes(List shardRoutings) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting routing : shardRoutings) { + builder.add(getFakeDiscoNode(routing.currentNodeId())); + } + return builder.build(); + } + + public static DiscoveryNodes getFakeRemoteEnabledDiscoveryNodes(List shardRoutings) { + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + for (ShardRouting routing : shardRoutings) { + builder.add(getFakeRemoteEnabledNode(routing.currentNodeId())); + } + return builder.build(); + } + + public static DiscoveryNodes getFakeDiscoveryNodes(ShardRouting shardRouting) { + return DiscoveryNodes.builder().add(getFakeDiscoNode(shardRouting.currentNodeId())).build(); + } +} From f33db50069d328d5a3c30d867f59eded0d12a320 Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Tue, 2 Apr 2024 18:22:14 +0530 Subject: [PATCH 34/56] Fix build due to multiple commits to same file causing compilation failure (#13019) Signed-off-by: Gaurav Bafna --- .../org/opensearch/action/bulk/TransportShardBulkAction.java | 2 +- .../support/replication/TransportReplicationAction.java | 4 ++-- .../opensearch/index/seqno/GlobalCheckpointSyncAction.java | 2 +- .../java/org/opensearch/index/seqno/ReplicationTracker.java | 3 ++- .../org/opensearch/index/shard/CheckpointRefreshListener.java | 2 +- .../src/main/java/org/opensearch/index/shard/IndexShard.java | 4 ++-- .../indices/replication/SegmentReplicationSourceFactory.java | 2 +- .../replication/checkpoint/PublishCheckpointAction.java | 4 ++-- .../replication/OpenSearchIndexLevelReplicationTestCase.java | 4 ++-- 9 files changed, 14 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index c1d13128c18b1..fdba8a42c0170 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -442,7 +442,7 @@ protected long primaryOperationSize(BulkShardRequest request) { @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteNode()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.PRIMARY_TERM_VALIDATION; } return super.getReplicationMode(indexShard); diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 8d86128e36441..49a96603f6802 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -356,7 +356,7 @@ public void performOn( * @return the overridden replication mode. */ public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteNode()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.NO_REPLICATION; } return ReplicationMode.FULL_REPLICATION; @@ -642,7 +642,7 @@ public void handleException(TransportException exp) { primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, retryTimeout, - indexShard.indexSettings().isRemoteNode() + indexShard.indexSettings().isAssignedOnRemoteNode() ? new ReplicationModeAwareProxy<>( getReplicationMode(indexShard), clusterState.getNodes(), diff --git a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java index 0c167d6d80b5c..c6a1f5f27a875 100644 --- a/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java +++ b/server/src/main/java/org/opensearch/index/seqno/GlobalCheckpointSyncAction.java @@ -135,7 +135,7 @@ protected void shardOperationOnReplica(Request shardRequest, IndexShard replica, private void maybeSyncTranslog(final IndexShard indexShard) throws IOException { if (indexShard.getTranslogDurability() == Translog.Durability.REQUEST && indexShard.getLastSyncedGlobalCheckpoint() < indexShard.getLastKnownGlobalCheckpoint() - && indexShard.indexSettings().isRemoteNode() == false) { + && indexShard.indexSettings().isAssignedOnRemoteNode() == false) { indexShard.sync(); } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index b2eb2f03486ac..599c54f293e89 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1378,7 +1378,8 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { final ShardRouting primaryShard = routingTable.primaryShard(); final String leaseId = getPeerRecoveryRetentionLeaseId(primaryShard); if (retentionLeases.get(leaseId) == null) { - if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) || indexSettings.isRemoteNode()) { + if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) + || indexSettings.isAssignedOnRemoteNode()) { assert primaryShard.allocationId().getId().equals(shardAllocationId) : routingTable.assignedShards() + " vs " + shardAllocationId; diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java index b47025d75282c..7f0806059155a 100644 --- a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -43,7 +43,7 @@ protected boolean performAfterRefreshWithPermit(boolean didRefresh) { if (didRefresh && shard.state() == IndexShardState.STARTED && shard.getReplicationTracker().isPrimaryMode() - && shard.indexSettings.isRemoteNode() == false) { + && shard.indexSettings.isAssignedOnRemoteNode() == false) { publisher.publish(shard, shard.getLatestReplicationCheckpoint()); } return true; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 484083a5b1260..be9a7872cd89c 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3520,8 +3520,8 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * When remote translog is enabled for an index, replication operation is limited to primary term validation and does not * update local checkpoint at replica, so the local checkpoint at replica can be less than globalCheckpoint. */ - assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) || indexSettings.isRemoteNode() - : "supposedly in-sync shard copy received a global checkpoint [" + assert (state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED) + || indexSettings.isAssignedOnRemoteNode() : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 657705a8cd725..81eb38757aebe 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -38,7 +38,7 @@ public SegmentReplicationSourceFactory( } public SegmentReplicationSource get(IndexShard shard) { - if (shard.indexSettings().isRemoteNode()) { + if (shard.indexSettings().isAssignedOnRemoteNode()) { return new RemoteStoreReplicationSource(shard); } else { return new PrimaryShardReplicationSource( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index 4d7d0a8633b5c..8f39aa194b06c 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -98,7 +98,7 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList @Override public ReplicationMode getReplicationMode(IndexShard indexShard) { - if (indexShard.indexSettings().isRemoteNode()) { + if (indexShard.indexSettings().isAssignedOnRemoteNode()) { return ReplicationMode.FULL_REPLICATION; } return super.getReplicationMode(indexShard); @@ -201,7 +201,7 @@ protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexSh logger.trace(() -> new ParameterizedMessage("Checkpoint {} received on replica {}", request, replica.shardId())); // Condition for ensuring that we ignore Segrep checkpoints received on Docrep shard copies. // This case will hit iff the replica hosting node is not remote enabled and replication type != SEGMENT - if (replica.indexSettings().isRemoteNode() == false && replica.indexSettings().isSegRepLocalEnabled() == false) { + if (replica.indexSettings().isAssignedOnRemoteNode() == false && replica.indexSettings().isSegRepLocalEnabled() == false) { logger.trace("Received segrep checkpoint on a docrep shard copy during an ongoing remote migration. NoOp."); return new ReplicaResult(); } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 3226035bba97b..a5dc13c334513 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -344,13 +344,13 @@ public synchronized void startAll() throws IOException { public synchronized DiscoveryNodes generateFakeDiscoveryNodes() { DiscoveryNodes.Builder builder = new DiscoveryNodes.Builder(); - if (primary.indexSettings() != null && primary.indexSettings().isRemoteNode()) { + if (primary.indexSettings() != null && primary.indexSettings().isAssignedOnRemoteNode()) { builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(primary.routingEntry().currentNodeId())); } else { builder.add(IndexShardTestUtils.getFakeDiscoNode(primary.routingEntry().currentNodeId())); } for (IndexShard replica : replicas) { - if (replica.indexSettings() != null && replica.indexSettings().isRemoteNode()) { + if (replica.indexSettings() != null && replica.indexSettings().isAssignedOnRemoteNode()) { builder.add(IndexShardTestUtils.getFakeRemoteEnabledNode(replica.routingEntry().currentNodeId())); } else { builder.add(IndexShardTestUtils.getFakeDiscoNode(replica.routingEntry().currentNodeId())); From a6bbc096f77b65f1b1d4947a88e58f6dc228644a Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Tue, 2 Apr 2024 07:27:59 -0700 Subject: [PATCH 35/56] Add more identifiers to gradle-check job webhook payload (#12938) * Add more identifiers to gradle-check job webhook payload Signed-off-by: Rishabh Singh * Update .github/workflows/gradle-check.yml Co-authored-by: Peter Nied Signed-off-by: Rishabh Singh * Add more identifiers to gradle-check job webhook payload Signed-off-by: Rishabh Singh --------- Signed-off-by: Rishabh Singh Signed-off-by: Rishabh Singh Co-authored-by: Peter Nied --- .github/workflows/gradle-check.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 1f5c187c28e7d..e38aafec0521f 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -30,11 +30,15 @@ jobs: - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | + echo "event_name=pull_request_target" >> $GITHUB_ENV + echo "branch_name=$(jq --raw-output .pull_request.base.ref $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output .pull_request.user.login $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --raw-output .pull_request.body $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Setup environment variables (Push) if: github.event_name == 'push' @@ -42,12 +46,15 @@ jobs: repo_url="https://github.com/opensearch-project/OpenSearch" ref_id=$(git rev-parse HEAD) branch_name=$(git rev-parse --abbrev-ref HEAD) + echo "event_name=push" >> $GITHUB_ENV echo "branch_name=$branch_name" >> $GITHUB_ENV echo "pr_from_sha=$ref_id" >> $GITHUB_ENV echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV echo "pr_number=Null" >> $GITHUB_ENV + echo "pr_owner=$(jq --raw-output '.commits[0].author.username' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_or_commit_description=$(jq --raw-output .head_commit.message $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Checkout opensearch-build repo uses: actions/checkout@v4 From 1beec654eb8884c65f1ed8441fb61867a9bfb96e Mon Sep 17 00:00:00 2001 From: "Daniel (dB.) Doubrovkine" Date: Tue, 2 Apr 2024 11:13:50 -0400 Subject: [PATCH 36/56] Revert "Add more identifiers to gradle-check job webhook payload (#12938)" (#13027) This reverts commit a6bbc096f77b65f1b1d4947a88e58f6dc228644a. Signed-off-by: dblock --- .github/workflows/gradle-check.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index e38aafec0521f..1f5c187c28e7d 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -30,15 +30,11 @@ jobs: - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | - echo "event_name=pull_request_target" >> $GITHUB_ENV - echo "branch_name=$(jq --raw-output .pull_request.base.ref $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_owner=$(jq --raw-output .pull_request.user.login $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_or_commit_description=$(jq --raw-output .pull_request.body $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Setup environment variables (Push) if: github.event_name == 'push' @@ -46,15 +42,12 @@ jobs: repo_url="https://github.com/opensearch-project/OpenSearch" ref_id=$(git rev-parse HEAD) branch_name=$(git rev-parse --abbrev-ref HEAD) - echo "event_name=push" >> $GITHUB_ENV echo "branch_name=$branch_name" >> $GITHUB_ENV echo "pr_from_sha=$ref_id" >> $GITHUB_ENV echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV echo "pr_number=Null" >> $GITHUB_ENV - echo "pr_owner=$(jq --raw-output '.commits[0].author.username' $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_or_commit_description=$(jq --raw-output .head_commit.message $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Checkout opensearch-build repo uses: actions/checkout@v4 From 9ba8b4f4151f8a95659d09ec2d62b81b2da58231 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 2 Apr 2024 12:49:44 -0400 Subject: [PATCH 37/56] Fix detect-breaking-change Github action configuration (#13030) Signed-off-by: Andriy Redko --- .github/workflows/detect-breaking-change.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/detect-breaking-change.yml b/.github/workflows/detect-breaking-change.yml index fec605c58f9c7..1913d070e8c24 100644 --- a/.github/workflows/detect-breaking-change.yml +++ b/.github/workflows/detect-breaking-change.yml @@ -1,5 +1,7 @@ name: "Detect Breaking Changes" -on: [push, pull_request] +on: + pull_request + jobs: detect-breaking-change: runs-on: ubuntu-latest @@ -11,6 +13,7 @@ jobs: java-version: 21 - uses: gradle/gradle-build-action@v3 with: + cache-disabled: true arguments: japicmp gradle-version: 8.7 build-root-directory: server From 8ca3e6ad51d73b6b89cb88eca8f4c759f708e59b Mon Sep 17 00:00:00 2001 From: Mohammad Qureshi <47198598+qreshi@users.noreply.github.com> Date: Tue, 2 Apr 2024 11:47:47 -0700 Subject: [PATCH 38/56] Add DerivedFieldMapper and support parsing it in mappings (#12569) Adds a DerivedFieldMapper to support the Derived Fields feature enhancement as well as updating the mapper parsing logic to recognize and currently parse derived fields in the mappings. --------- Signed-off-by: Mohammad Qureshi Signed-off-by: Rishabh Maurya Co-authored-by: Rishabh Maurya --- CHANGELOG.md | 1 + .../index/mapper/DerivedFieldMapper.java | 129 ++++++++ .../opensearch/index/mapper/ObjectMapper.java | 85 +++++- .../index/mapper/ParametrizedFieldMapper.java | 6 +- .../org/opensearch/indices/IndicesModule.java | 2 + .../mapper/DerivedFieldMapperQueryTests.java | 279 ++++++++++++++++++ .../index/mapper/DerivedFieldMapperTests.java | 117 ++++++++ .../index/mapper/ObjectMapperTests.java | 47 +++ .../index/mapper/MapperServiceTestCase.java | 21 +- .../aggregations/AggregatorTestCase.java | 2 + 10 files changed, 686 insertions(+), 3 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperQueryTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/DerivedFieldMapperTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 5371a8372c56d..a83f93e358f53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -109,6 +109,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow setting KEYSTORE_PASSWORD through env variable ([#12865](https://github.com/opensearch-project/OpenSearch/pull/12865)) - [Concurrent Segment Search] Perform buildAggregation concurrently and support Composite Aggregations ([#12697](https://github.com/opensearch-project/OpenSearch/pull/12697)) - [Concurrent Segment Search] Disable concurrent segment search for system indices and throttled requests ([#12954](https://github.com/opensearch-project/OpenSearch/pull/12954)) +- Derived fields support to derive field values at query time without indexing ([#12569](https://github.com/opensearch-project/OpenSearch/pull/12569)) - Detect breaking changes on pull requests ([#9044](https://github.com/opensearch-project/OpenSearch/pull/9044)) - Add cluster primary balance contraint for rebalancing with buffer ([#12656](https://github.com/opensearch-project/OpenSearch/pull/12656)) diff --git a/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java new file mode 100644 index 0000000000000..b448487a4f810 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/DerivedFieldMapper.java @@ -0,0 +1,129 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.IndexableField; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.script.Script; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; + +/** + * A field mapper for derived fields + * + * @opensearch.internal + */ +public class DerivedFieldMapper extends ParametrizedFieldMapper { + + public static final String CONTENT_TYPE = "derived"; + + private static DerivedFieldMapper toType(FieldMapper in) { + return (DerivedFieldMapper) in; + } + + /** + * Builder for this field mapper + * + * @opensearch.internal + */ + public static class Builder extends ParametrizedFieldMapper.Builder { + // TODO: The type of parameter may change here if the actual underlying FieldType object is needed + private final Parameter type = Parameter.stringParam("type", false, m -> toType(m).type, "text"); + + private final Parameter