diff --git a/.ci/bwcVersions b/.ci/bwcVersions
index 398c3d52595f1..a738eb54e17f6 100644
--- a/.ci/bwcVersions
+++ b/.ci/bwcVersions
@@ -34,3 +34,5 @@ BWC_VERSION:
- "2.14.0"
- "2.14.1"
- "2.15.0"
+ - "2.15.1"
+ - "2.16.0"
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index b5b0a815b02b2..8ceecb3abb4a2 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -11,7 +11,7 @@
# 3. Use the command palette to run the CODEOWNERS: Show owners of current file command, which will display all code owners for the current file.
# Default ownership for all repo files
-* @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah
+* @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
/modules/transport-netty4/ @peternied
@@ -24,4 +24,4 @@
/.github/ @peternied
-/MAINTAINERS.md @anasalkouz @andrross @Bukhtawar @CEHENKLE @dblock @dbwiddis @dreamer-89 @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @tlfeng @VachaShah
+/MAINTAINERS.md @anasalkouz @andrross @ashking94 @Bukhtawar @CEHENKLE @dblock @dbwiddis @gbbafna @jed326 @kotwanikunal @mch2 @msfroh @nknize @owaiskazi19 @peternied @reta @Rishikesh1159 @sachinpkale @saratvemulapalli @shwetathareja @sohami @VachaShah
diff --git a/.github/ISSUE_TEMPLATE/failed_check.md b/.github/ISSUE_TEMPLATE/failed_check.md
deleted file mode 100644
index 71508c9f5bd43..0000000000000
--- a/.github/ISSUE_TEMPLATE/failed_check.md
+++ /dev/null
@@ -1,8 +0,0 @@
----
-title: '[AUTOCUT] Gradle Check Failure on push to {{ env.branch_name }}'
-labels: '>test-failure, bug, autocut'
----
-
-Gradle check has failed on push of your commit {{ env.pr_from_sha }}.
-Please examine the workflow log {{ env.workflow_url }}.
-Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change?
diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml
index 07185ef4c65e3..89d894403ff1a 100644
--- a/.github/workflows/gradle-check.yml
+++ b/.github/workflows/gradle-check.yml
@@ -14,7 +14,7 @@ permissions:
jobs:
check-files:
runs-on: ubuntu-latest
- outputs:
+ outputs:
RUN_GRADLE_CHECK: ${{ steps.changed-files-specific.outputs.any_changed }}
steps:
- uses: actions/checkout@v4
@@ -26,7 +26,7 @@ jobs:
release-notes/*.md
.github/**
*.md
-
+
gradle-check:
needs: check-files
if: github.repository == 'opensearch-project/OpenSearch' && needs.check-files.outputs.RUN_GRADLE_CHECK == 'true'
@@ -113,6 +113,7 @@ jobs:
if: success()
uses: codecov/codecov-action@v4
with:
+ token: ${{ secrets.CODECOV_TOKEN }}
files: ./codeCoverage.xml
- name: Create Comment Success
@@ -158,15 +159,6 @@ jobs:
Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change?
- - name: Create Issue On Push Failure
- if: ${{ github.event_name == 'push' && failure() }}
- uses: dblock/create-a-github-issue@v3
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- with:
- assignees: ${{ github.event.head_commit.author.username }}, ${{ github.triggering_actor }}
- filename: .github/ISSUE_TEMPLATE/failed_check.md
-
check-result:
needs: [check-files, gradle-check]
if: always()
@@ -174,4 +166,4 @@ jobs:
steps:
- name: Fail if gradle-check fails
if: ${{ needs.check-files.outputs.RUN_GRADLE_CHECK && needs.gradle-check.result == 'failure' }}
- run: exit 1
\ No newline at end of file
+ run: exit 1
diff --git a/.whitesource b/.whitesource
new file mode 100644
index 0000000000000..bb071b4a2b1ce
--- /dev/null
+++ b/.whitesource
@@ -0,0 +1,45 @@
+{
+ "scanSettings": {
+ "configMode": "AUTO",
+ "configExternalURL": "",
+ "projectToken": "",
+ "baseBranches": []
+ },
+ "scanSettingsSAST": {
+ "enableScan": false,
+ "scanPullRequests": false,
+ "incrementalScan": true,
+ "baseBranches": [],
+ "snippetSize": 10
+ },
+ "checkRunSettings": {
+ "vulnerableCheckRunConclusionLevel": "failure",
+ "displayMode": "diff",
+ "useMendCheckNames": true
+ },
+ "checkRunSettingsSAST": {
+ "checkRunConclusionLevel": "failure",
+ "severityThreshold": "high"
+ },
+ "issueSettings": {
+ "minSeverityLevel": "LOW",
+ "issueType": "DEPENDENCY"
+ },
+ "issueSettingsSAST": {
+ "minSeverityLevel": "high",
+ "issueType": "repo"
+ },
+ "remediateSettings": {
+ "workflowRules": {
+ "enabled": true
+ }
+ },
+ "imageSettings":{
+ "imageTracing":{
+ "enableImageTracingPR": false,
+ "addRepositoryCoordinate": false,
+ "addDockerfilePath": false,
+ "addMendIdentifier": false
+ }
+ }
+}
\ No newline at end of file
diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md
index 964383078c38d..06b761b1df8bd 100644
--- a/CHANGELOG-3.0.md
+++ b/CHANGELOG-3.0.md
@@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Dependencies
### Changed
+- Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345))
- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459))
- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773))
- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792))
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c6b9822d9a8f3..4d0990db31d20 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,74 +5,61 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
## [Unreleased 2.x]
### Added
-- Add leader and follower check failure counter metrics ([#12439](https://github.com/opensearch-project/OpenSearch/pull/12439))
-- Add latency metrics for instrumenting critical clusterManager code paths ([#12333](https://github.com/opensearch-project/OpenSearch/pull/12333))
-- Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423))
-- Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478))
-- Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293))
-- [Remote Store] Add dynamic cluster settings to set timeout for segments upload to Remote Store ([#13679](https://github.com/opensearch-project/OpenSearch/pull/13679))
-- [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637))
-- Add getMetadataFields to MapperService ([#13819](https://github.com/opensearch-project/OpenSearch/pull/13819))
-- [Remote State] Add async remote state deletion task running on an interval, configurable by a setting ([#13131](https://github.com/opensearch-project/OpenSearch/pull/13131))
-- Allow setting query parameters on requests ([#13776](https://github.com/opensearch-project/OpenSearch/issues/13776))
-- Add capability to disable source recovery_source for an index ([#13590](https://github.com/opensearch-project/OpenSearch/pull/13590))
-- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304))
-- Add upload flow for writing routing table to remote store ([#13870](https://github.com/opensearch-project/OpenSearch/pull/13870))
-- Add dynamic action retry timeout setting ([#14022](https://github.com/opensearch-project/OpenSearch/issues/14022))
-- [Remote Store] Add support to disable flush based on translog reader count ([#14027](https://github.com/opensearch-project/OpenSearch/pull/14027))
-- Add recovery chunk size setting ([#13997](https://github.com/opensearch-project/OpenSearch/pull/13997))
-- [Query Insights] Add exporter support for top n queries ([#12982](https://github.com/opensearch-project/OpenSearch/pull/12982))
-- [Query Insights] Add X-Opaque-Id to search request metadata for top n queries ([#13374](https://github.com/opensearch-project/OpenSearch/pull/13374))
-- [Streaming Indexing] Enhance RestAction with request / response streaming support ([#13772](https://github.com/opensearch-project/OpenSearch/pull/13772))
-- Add support for query level resource usage tracking ([#13172](https://github.com/opensearch-project/OpenSearch/pull/13172))
-- Move Remote Store Migration from DocRep to GA and modify remote migration settings name ([#14100](https://github.com/opensearch-project/OpenSearch/pull/14100))
-- Derived field object type support ([#13720](https://github.com/opensearch-project/OpenSearch/pull/13720))
-- [Query Insights] Add cpu and memory metrics to top n queries ([#13739](https://github.com/opensearch-project/OpenSearch/pull/13739))
+- Add fingerprint ingest processor ([#13724](https://github.com/opensearch-project/OpenSearch/pull/13724))
+- [Remote Store] Rate limiter for remote store low priority uploads ([#14374](https://github.com/opensearch-project/OpenSearch/pull/14374/))
+- Apply the date histogram rewrite optimization to range aggregation ([#13865](https://github.com/opensearch-project/OpenSearch/pull/13865))
+- [Writable Warm] Add composite directory implementation and integrate it with FileCache ([12782](https://github.com/opensearch-project/OpenSearch/pull/12782))
+- [Workload Management] Add QueryGroup schema ([13669](https://github.com/opensearch-project/OpenSearch/pull/13669))
+- Add batching supported processor base type AbstractBatchingProcessor ([#14554](https://github.com/opensearch-project/OpenSearch/pull/14554))
+- Fix race condition while parsing derived fields from search definition ([14445](https://github.com/opensearch-project/OpenSearch/pull/14445))
+- Add allowlist setting for ingest-common and search-pipeline-common processors ([#14439](https://github.com/opensearch-project/OpenSearch/issues/14439))
### Dependencies
-- Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559))
- Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442))
-- Bump `org.apache.commons:commons-text` from 1.11.0 to 1.12.0 ([#13557](https://github.com/opensearch-project/OpenSearch/pull/13557))
-- Bump `org.hdrhistogram:HdrHistogram` from 2.1.12 to 2.2.2 ([#13556](https://github.com/opensearch-project/OpenSearch/pull/13556), [#13986](https://github.com/opensearch-project/OpenSearch/pull/13986))
-- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.4 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641), [#13753](https://github.com/opensearch-project/OpenSearch/pull/13753))
-- Bump `org.apache.hadoop:hadoop-minicluster` from 3.3.6 to 3.4.0 ([#13642](https://github.com/opensearch-project/OpenSearch/pull/13642))
-- Bump `mockito` from 5.11.0 to 5.12.0 ([#13665](https://github.com/opensearch-project/OpenSearch/pull/13665))
-- Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752))
-- Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756))
-- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802))
-- Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817))
-- Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825))
-- Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825))
-- Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840))
-- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839))
-- Bump `actions/checkout` from 3 to 4 ([#13935](https://github.com/opensearch-project/OpenSearch/pull/13935))
-- Bump `com.netflix.nebula.ospackage-base` from 11.9.0 to 11.9.1 ([#13933](https://github.com/opensearch-project/OpenSearch/pull/13933))
- Update to Apache Lucene 9.11.0 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042))
-- Bump `com.azure:azure-core-http-netty` from 1.12.8 to 1.15.1 ([#14128](https://github.com/opensearch-project/OpenSearch/pull/14128))
-- Bump `tim-actions/get-pr-commits` from 1.1.0 to 1.3.1 ([#14126](https://github.com/opensearch-project/OpenSearch/pull/14126))
+- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356))
+- Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361))
+- Bump `reactor` from 3.5.17 to 3.5.18 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395))
+- Bump `reactor-netty` from 1.1.19 to 1.1.20 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395))
+- Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396))
+- Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398))
+- Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399))
+- Bump `com.gradle.develocity` from 3.17.4 to 3.17.5 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397))
+- Bump `opentelemetry` from 1.36.0 to 1.39.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457))
+- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14506))
+- Bump `com.azure:azure-storage-common` from 12.21.2 to 12.25.1 ([#14517](https://github.com/opensearch-project/OpenSearch/pull/14517))
+- Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.0 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610))
+- Bump `com.github.spullara.mustache.java:compiler` from 0.9.13 to 0.9.14 ([#14672](https://github.com/opensearch-project/OpenSearch/pull/14672))
+- Bump `net.minidev:accessors-smart` from 2.5.0 to 2.5.1 ([#14673](https://github.com/opensearch-project/OpenSearch/pull/14673))
### Changed
-- Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650))
-- Refactor implementations of query phase searcher, allow QueryCollectorContext to have zero collectors ([#13481](https://github.com/opensearch-project/OpenSearch/pull/13481))
-- Adds support to inject telemetry instances to plugins ([#13636](https://github.com/opensearch-project/OpenSearch/pull/13636))
-- Adds support to provide tags with value in Gauge metric. ([#13994](https://github.com/opensearch-project/OpenSearch/pull/13994))
-- Move cache removal notifications outside lru lock ([#14017](https://github.com/opensearch-project/OpenSearch/pull/14017))
+- [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187))
+- unsignedLongRangeQuery now returns MatchNoDocsQuery if the lower bounds are greater than the upper bounds ([#14416](https://github.com/opensearch-project/OpenSearch/pull/14416))
+- Updated the `indices.query.bool.max_clause_count` setting from being static to dynamically updateable ([#13568](https://github.com/opensearch-project/OpenSearch/pull/13568))
+- Make the class CommunityIdProcessor final ([#14448](https://github.com/opensearch-project/OpenSearch/pull/14448))
+- Allow @InternalApi annotation on classes not meant to be constructed outside of the OpenSearch core ([#14575](https://github.com/opensearch-project/OpenSearch/pull/14575))
+- Add @InternalApi annotation to japicmp exclusions ([#14597](https://github.com/opensearch-project/OpenSearch/pull/14597))
+- Allow system index warning in OpenSearchRestTestCase.refreshAllIndices ([#14635](https://github.com/opensearch-project/OpenSearch/pull/14635))
### Deprecated
### Removed
-- Remove handling of index.mapper.dynamic in AutoCreateIndex([#13067](https://github.com/opensearch-project/OpenSearch/pull/13067))
### Fixed
-- Fix get field mapping API returns 404 error in mixed cluster with multiple versions ([#13624](https://github.com/opensearch-project/OpenSearch/pull/13624))
-- Allow clearing `remote_store.compatibility_mode` setting ([#13646](https://github.com/opensearch-project/OpenSearch/pull/13646))
-- Fix ReplicaShardBatchAllocator to batch shards without duplicates ([#13710](https://github.com/opensearch-project/OpenSearch/pull/13710))
-- Don't return negative scores from `multi_match` query with `cross_fields` type ([#13829](https://github.com/opensearch-project/OpenSearch/pull/13829))
-- Painless: ensure type "UnmodifiableMap" for params ([#13885](https://github.com/opensearch-project/OpenSearch/pull/13885))
-- Pass parent filter to inner hit query ([#13903](https://github.com/opensearch-project/OpenSearch/pull/13903))
-- Fix NPE on restore searchable snapshot ([#13911](https://github.com/opensearch-project/OpenSearch/pull/13911))
-- Fix double invocation of postCollection when MultiBucketCollector is present ([#14015](https://github.com/opensearch-project/OpenSearch/pull/14015))
+- Fix bug in SBP cancellation logic ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13474))
+- Fix handling of Short and Byte data types in ScriptProcessor ingest pipeline ([#14379](https://github.com/opensearch-project/OpenSearch/issues/14379))
+- Switch to iterative version of WKT format parser ([#14086](https://github.com/opensearch-project/OpenSearch/pull/14086))
+- Fix the computed max shards of cluster to avoid int overflow ([#14155](https://github.com/opensearch-project/OpenSearch/pull/14155))
+- Fixed rest-high-level client searchTemplate & mtermVectors endpoints to have a leading slash ([#14465](https://github.com/opensearch-project/OpenSearch/pull/14465))
+- Write shard level metadata blob when snapshotting searchable snapshot indexes ([#13190](https://github.com/opensearch-project/OpenSearch/pull/13190))
+- Fix aggs result of NestedAggregator with sub NestedAggregator ([#13324](https://github.com/opensearch-project/OpenSearch/pull/13324))
+- Fix fs info reporting negative available size ([#11573](https://github.com/opensearch-project/OpenSearch/pull/11573))
+- Add ListPitInfo::getKeepAlive() getter ([#14495](https://github.com/opensearch-project/OpenSearch/pull/14495))
+- Fix FuzzyQuery in keyword field will use IndexOrDocValuesQuery when both of index and doc_value are true ([#14378](https://github.com/opensearch-project/OpenSearch/pull/14378))
+- Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004))
+- Handle NPE in GetResult if "found" field is missing ([#14552](https://github.com/opensearch-project/OpenSearch/pull/14552))
+- Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200))
### Security
-[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.13...2.x
+[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.15...2.x
diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md
index bc11e7335af49..03cd189aa911e 100644
--- a/DEVELOPER_GUIDE.md
+++ b/DEVELOPER_GUIDE.md
@@ -651,16 +651,18 @@ Note that these snapshots do not follow the Maven [naming convention](https://ma
### Flaky Tests
-OpenSearch has a very large test suite with long running, often failing (flaky), integration tests. Such individual tests are labelled as [Flaky Random Test Failure](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22). Your help is wanted fixing these!
+If you encounter a test failure locally or in CI that is seemingly unrelated to the change in your pull request, it may be a known flaky test or a new test failure. OpenSearch has a very large test suite with long running, often failing (flaky), integration tests. Such individual tests are labelled as [Flaky Random Test Failure](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22). Your help is wanted fixing these!
-If you encounter a build/test failure in CI that is unrelated to the change in your pull request, it may be a known flaky test, or a new test failure.
+The automation [gradle-check-flaky-test-detector](https://build.ci.opensearch.org/job/gradle-check-flaky-test-detector/), which runs in OpenSearch public Jenkins, identifies failing flaky issues that are part of post-merge actions. Once a flaky test is identified, the automation creates an issue with detailed report that includes links to all relevant commits, the Gradle check build log, the test report, and pull requests that are impacted with the flaky test failures. This automation leverages data from the [OpenSearch Metrics Project](https://github.com/opensearch-project/opensearch-metrics) to establish a baseline for creating the issue and updating the flaky test report. For all flaky test issues created by automation, visit this [link](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A%3Etest-failure+author%3Aopensearch-ci-bot).
+
+If you still see a failing test that is not part of the post merge actions, please do:
+
+* Follow failed CI links, and locate the failing test(s) or use the [Gradle Check Metrics Dashboard](#gradle-check-metrics-dashboard).
+* Copy-paste the failure into a comment of your PR.
+* Search through issues using the name of the failed test for whether this is a known flaky test.
+* If no existing issue is found, open one.
+* Retry CI via the GitHub UX or by pushing an update to your PR.
-1. Follow failed CI links, and locate the failing test(s).
-2. Copy-paste the failure into a comment of your PR.
-3. Search through [issues](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22) using the name of the failed test for whether this is a known flaky test.
-4. If an existing issue is found, paste a link to the known issue in a comment to your PR.
-5. If no existing issue is found, open one.
-6. Retry CI via the GitHub UX or by pushing an update to your PR.
### Gradle Check Metrics Dashboard
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 6855281a488ca..3298ceb15463c 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -9,6 +9,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje
| Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon |
| Andrew Ross | [andrross](https://github.com/andrross) | Amazon |
| Andriy Redko | [reta](https://github.com/reta) | Aiven |
+| Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon |
| Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon |
| Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon |
| Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon |
@@ -26,18 +27,18 @@ This document contains a list of maintainers in this repo. See [opensearch-proje
| Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon |
| Shweta Thareja | [shwetathareja](https://github.com/shwetathareja) | Amazon |
| Sorabh Hamirwasia | [sohami](https://github.com/sohami) | Amazon |
-| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon |
-| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon |
| Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon |
## Emeritus
-| Maintainer | GitHub ID | Affiliation |
-| --------------------- | ----------------------------------------- | ----------- |
-| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon |
-| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon |
-| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon |
-| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta |
-| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon |
-| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon |
-| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent |
+| Maintainer | GitHub ID | Affiliation |
+| ---------------------- |-------------------------------------------- | ----------- |
+| Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon |
+| Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon |
+| Kartik Ganesh | [kartg](https://github.com/kartg) | Amazon |
+| Abbas Hussain | [abbashus](https://github.com/abbashus) | Meta |
+| Himanshu Setia | [setiah](https://github.com/setiah) | Amazon |
+| Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon |
+| Rabi Panda | [adnapibar](https://github.com/adnapibar) | Independent |
+| Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon |
+| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon |
diff --git a/TESTING.md b/TESTING.md
index 80fc2412d736b..de7ab3eefe2f8 100644
--- a/TESTING.md
+++ b/TESTING.md
@@ -17,6 +17,8 @@ OpenSearch uses [jUnit](https://junit.org/junit5/) for testing, it also uses ran
- [Miscellaneous](#miscellaneous)
- [Running verification tasks](#running-verification-tasks)
- [Testing the REST layer](#testing-the-rest-layer)
+ - [Running REST Tests Against An External Cluster](#running-rest-tests-against-an-external-cluster)
+ - [Debugging REST Tests](#debugging-rest-tests)
- [Testing packaging](#testing-packaging)
- [Testing packaging on Windows](#testing-packaging-on-windows)
- [Testing VMs are disposable](#testing-vms-are-disposable)
@@ -33,6 +35,9 @@ OpenSearch uses [jUnit](https://junit.org/junit5/) for testing, it also uses ran
- [Bad practices](#bad-practices)
- [Use randomized-testing for coverage](#use-randomized-testing-for-coverage)
- [Abuse randomization in multi-threaded tests](#abuse-randomization-in-multi-threaded-tests)
+ - [Use `Thread.sleep`](#use-threadsleep)
+ - [Expect a specific segment topology](#expect-a-specific-segment-topology)
+ - [Leave environment in an unstable state after test](#leave-environment-in-an-unstable-state-after-test)
- [Test coverage analysis](#test-coverage-analysis)
- [Building with extra plugins](#building-with-extra-plugins)
- [Environment misc](#environment-misc)
@@ -88,21 +93,23 @@ This will instruct all JVMs (including any that run cli tools such as creating t
## Test case filtering
-- `tests.class` is a class-filtering shell-like glob pattern
-- `tests.method` is a method-filtering glob pattern.
+To be able to run a single test you need to specify the module where you're running the tests from.
+
+Example: `./gradlew server:test --tests "*.ReplicaShardBatchAllocatorTests.testNoAsyncFetchData"`
Run a single test case (variants)
- ./gradlew test -Dtests.class=org.opensearch.package.ClassName
- ./gradlew test "-Dtests.class=*.ClassName"
+ ./gradlew module:test --tests org.opensearch.package.ClassName
+ ./gradlew module:test --tests org.opensearch.package.ClassName.testName
+ ./gradlew module:test --tests "*.ClassName"
Run all tests in a package and its sub-packages
- ./gradlew test "-Dtests.class=org.opensearch.package.*"
+ ./gradlew module:test --tests "org.opensearch.package.*"
Run any test methods that contain *esi* (e.g.: .r*esi*ze.)
- ./gradlew test "-Dtests.method=*esi*"
+ ./gradlew module:test --tests "*esi*"
Run all tests that are waiting for a bugfix (disabled by default)
@@ -267,7 +274,18 @@ yamlRestTest’s and javaRestTest’s are easy to identify, since they are found
If in doubt about which command to use, simply run <gradle path>:check
-Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the `tests.cluster` property, which if present needs to contain a comma separated list of nodes to connect to (e.g. localhost:9300).
+## Running REST Tests Against An External Cluster
+
+Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the following properties `tests.cluster`, `tests.rest.cluster`, `tests.clustername`. Use a comma separated list of node properties for the multi-node cluster.
+
+For example :
+
+ ./gradlew :rest-api-spec:yamlRestTest \
+ -Dtests.cluster=localhost:9200 -Dtests.rest.cluster=localhost:9200 -Dtests.clustername=opensearch
+
+## Debugging REST Tests
+
+You can launch a local OpenSearch cluster in debug mode following [Launching and debugging from an IDE](#launching-and-debugging-from-an-ide), and run your REST tests against that following [Running REST Tests Against An External Cluster](#running-rest-tests-against-an-external-cluster).
# Testing packaging
@@ -455,7 +473,7 @@ Unit tests are the preferred way to test some functionality: most of the time th
The reason why `OpenSearchSingleNodeTestCase` exists is that all our components used to be very hard to set up in isolation, which had led us to having a number of integration tests but close to no unit tests. `OpenSearchSingleNodeTestCase` is a workaround for this issue which provides an easy way to spin up a node and get access to components that are hard to instantiate like `IndicesService`. Whenever practical, you should prefer unit tests.
-Finally, if the the functionality under test needs to be run in a cluster, there are two test classes to consider:
+Finally, if the functionality under test needs to be run in a cluster, there are two test classes to consider:
* `OpenSearchRestTestCase` will connect to an external cluster. This is a good option if the tests cases don't rely on a specific configuration of the test cluster. A test cluster is set up as part of the Gradle task running integration tests, and test cases using this class can connect to it. The configuration of the cluster is provided in the Gradle files.
* `OpenSearchIntegTestCase` will create a local cluster as part of each test case. The configuration of the cluster is controlled by the test class. This is a good option if different tests cases depend on different cluster configurations, as it would be impractical (and limit parallelization) to keep re-configuring (and re-starting) the external cluster for each test case. A good example of when this class might come in handy is for testing security features, where different cluster configurations are needed to fully test each one.
@@ -477,6 +495,27 @@ However, it should not be used for coverage. For instance if you are testing a p
Multi-threaded tests are often not reproducible due to the fact that there is no guarantee on the order in which operations occur across threads. Adding randomization to the mix usually makes things worse and should be done with care.
+### Use `Thread.sleep`
+
+`Thread.sleep()` is almost always a bad idea because it is very difficult to know that you've waited long enough. Using primitives like `waitUntil` or `assertBusy`, which use Thread.sleep internally, is okay to wait for a specific condition. However, it is almost always better to instrument your code with concurrency primitives like a `CountDownLatch` that will allow you to deterministically wait for a specific condition, without waiting longer than necessary that will happen with a polling approach used by `assertBusy`.
+
+Example:
+- [PrimaryShardAllocatorIT](https://github.com/opensearch-project/OpenSearch/blob/7ffcd6500e0bd5956cef5c289ee66d9f99d533fc/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java#L208-L235): This test is using two latches: one to wait for a recovery to start and one to block that recovery so that it can deterministically test things that happen during a recovery.
+
+### Expect a specific segment topology
+
+By design, OpenSearch integration tests will vary how the merge policy works because in almost all scenarios you should not depend on a specific segment topology (in the real world your code will see a huge diversity of indexing workloads with OpenSearch merging things in the background all the time!). If you do in fact need to care about the segment topology (e.g. for testing statistics that might vary slightly depending on number of segments), then you must take care to ensure that segment topology is deterministic by doing things like disabling background refreshes, force merging after indexing data, etc.
+
+Example:
+- [SegmentReplicationResizeRequestIT](https://github.com/opensearch-project/OpenSearch/blob/f715ee1a485e550802accc1c2e3d8101208d4f0b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationResizeRequestIT.java#L102-L109): This test disables refreshes to prevent interfering with the segment replication behavior under test.
+
+### Leave environment in an unstable state after test
+
+The default test case will ensure that no open file handles or running threads are left after tear down. You must ensure that all resources are cleaned up at the end of each test case, or else the cleanup may end up racing with the tear down logic in the base test class in a way that is very difficult to reproduce.
+
+Example:
+- [AwarenessAttributeDecommissionIT](https://github.com/opensearch-project/OpenSearch/blob/main/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java#L951): Recommissions any decommissioned nodes at the end of the test to ensure the after-test checks succeed.
+
# Test coverage analysis
The code coverage report can be generated through Gradle with [JaCoCo plugin](https://docs.gradle.org/current/userguide/jacoco_plugin.html).
diff --git a/TRIAGING.md b/TRIAGING.md
index bb04d49a66c54..c7c07a8ce30bd 100644
--- a/TRIAGING.md
+++ b/TRIAGING.md
@@ -1,6 +1,6 @@
-The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation.
+The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. There are several weekly triage meetings scoped to the following component areas: Search, Storage, Cluster Manager, and finally "Core" as a catch-all for all other issues. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation.
### Do I need to attend for my issue to be addressed/triaged?
@@ -12,25 +12,30 @@ You can track if your issue was triaged by watching your GitHub notifications fo
Each meeting we seek to address all new issues. However, should we run out of time before your issue is discussed, you are always welcome to attend the next meeting or to follow up on the issue post itself.
-### How do I join the Triage meeting?
+### How do I join a Triage meeting?
-Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Zoom](https://zoom.us/download), with this [meeting link](https://us02web.zoom.us/j/86287450465) and passcode `805212`.
+ Check the [OpenSearch Meetup Group](https://www.meetup.com/opensearch/) for the latest schedule and details for joining each meeting. Each component area has its own meetup series: [Search](https://www.meetup.com/opensearch/events/300929493/), [Storage](https://www.meetup.com/opensearch/events/299907409/), [Cluster Manager](https://www.meetup.com/opensearch/events/301082218/), [Indexing](https://www.meetup.com/opensearch/events/301734024/), and [Core](https://www.meetup.com/opensearch/events/301061009/).
-After joining the Zoom meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat.
+After joining the virtual meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat.
If you have an issue you'd like to bring forth please prepare a link to the issue so it can be presented and viewed by everyone in the meeting.
### Is there an agenda for each week?
-Meetings are 55 minutes and follows this structure:
+Meeting structure may vary slightly, but the general structure is as follows:
-Yes, each 55-minute meeting follows this structure:
1. **Initial Gathering:** Feel free to turn on your video and engage in informal conversation. Shortly, a volunteer triage [facilitator](#what-is-the-role-of-the-facilitator) will begin the meeting and share their screen.
2. **Record Attendees:** The facilitator will request attendees to share their GitHub profile links. These links will be collected and assembled into a [tag](#how-do-triage-facilitator-tag-comments-during-the-triage-meeting) to annotate comments during the meeting.
3. **Announcements:** Any announcements will be made at the beginning of the meeting.
-4. **Review of New Issues:** We start by reviewing all untriaged [issues](https://github.com/search?q=label%3Auntriaged+is%3Aopen++repo%3Aopensearch-project%2FOpenSearch+&type=issues&ref=advsearch&s=created&o=desc) for the OpenSearch repo.
+4. **Review of New Issues:** We start by reviewing all untriaged issues. Each meeting has a label-based search to find relevant issues:
+ - [Search](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22)
+ - [Indexing](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22%2C)
+ - [Storage](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3AStorage%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22)
+ - [Cluster Manager](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+label%3A%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22)
+ - [Core](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+-label%3A%22Search%22%2C%22Search%3ARemote+Search%22%2C%22Search%3AResiliency%22%2C%22Search%3APerformance%22%2C%22Search%3ARelevance%22%2C%22Search%3AAggregations%22%2C%22Search%3AQuery+Capabilities%22%2C%22Search%3AQuery+Insights%22%2C%22Search%3ASearchable+Snapshots%22%2C%22Search%3AUser+Behavior+Insights%22%2C%22Storage%22%2C%22Storage%3AResiliency%22%2C%22Storage%3APerformance%22%2C%22Storage%3ASnapshots%22%2C%22Storage%3ARemote%22%2C%22Storage%3ADurability%22%2C%22Cluster+Manager%22%2C%22ClusterManager%3ARemoteState%22%2C%22Indexing%3AReplication%22%2C%22Indexing%22%2C%22Indexing%3APerformance%22%2C%22Indexing+%26+Search%22)
5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request.
6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests.
+7. **Review of Old Untriaged Issues:** Time permitting, each meeting will look at all [untriaged issues older than 14 days](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged+created%3A%3C2024-05-20) to prevent issues from falling through the cracks (note the GitHub API does not allow for relative times, so the date in this search must be updated every meeting).
### What is the role of the facilitator?
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index c68cc0406d3a6..b984ef3800490 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -128,7 +128,7 @@ dependencies {
testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}"
testFixturesApi gradleApi()
testFixturesApi gradleTestKit()
- testImplementation 'org.wiremock:wiremock-standalone:3.3.1'
+ testImplementation 'org.wiremock:wiremock-standalone:3.6.0'
testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}"
integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') {
exclude module: "groovy"
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java
index 2ea8c2d015ecc..d0cb2da9c1dd3 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java
@@ -110,7 +110,7 @@ public void execute(Task t) {
if (BuildParams.getRuntimeJavaVersion() == JavaVersion.VERSION_1_8) {
test.systemProperty("java.locale.providers", "SPI,JRE");
} else {
- test.systemProperty("java.locale.providers", "SPI,COMPAT");
+ test.systemProperty("java.locale.providers", "SPI,CLDR");
if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) < 0) {
test.jvmArgs("--illegal-access=warn");
}
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java
index 448ba8a96ef02..570ab4a9f70e1 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/info/GlobalBuildInfoPlugin.java
@@ -199,7 +199,28 @@ private JavaVersion determineJavaVersion(String description, File javaHome, Java
}
private JvmInstallationMetadata getJavaInstallation(File javaHome) {
- final InstallationLocation location = new InstallationLocation(javaHome, "Java home");
+ InstallationLocation location = null;
+
+ try {
+ try {
+ // The InstallationLocation(File, String) is used by Gradle pre-8.8
+ location = (InstallationLocation) MethodHandles.publicLookup()
+ .findConstructor(InstallationLocation.class, MethodType.methodType(void.class, File.class, String.class))
+ .invokeExact(javaHome, "Java home");
+ } catch (Throwable ex) {
+ // The InstallationLocation::userDefined is used by Gradle post-8.7
+ location = (InstallationLocation) MethodHandles.publicLookup()
+ .findStatic(
+ InstallationLocation.class,
+ "userDefined",
+ MethodType.methodType(InstallationLocation.class, File.class, String.class)
+ )
+ .invokeExact(javaHome, "Java home");
+
+ }
+ } catch (Throwable ex) {
+ throw new IllegalStateException("Unable to find suitable InstallationLocation constructor / factory method", ex);
+ }
try {
try {
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java
index 7ab91448252f2..a7f720855951a 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/internal/InternalDistributionArchiveSetupPlugin.java
@@ -148,8 +148,8 @@ private void configureGeneralTaskDefaults(Project project) {
project.getTasks().withType(AbstractCopyTask.class).configureEach(t -> {
t.dependsOn(project.getTasks().withType(EmptyDirTask.class));
t.setIncludeEmptyDirs(true);
- t.setDirMode(0755);
- t.setFileMode(0644);
+ t.dirPermissions(perms -> perms.unix(0755));
+ t.filePermissions(perms -> perms.unix(0644));
});
// common config across all archives
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java
index d4dcde9d63087..28a344de31ddb 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesPrecommitPlugin.java
@@ -33,11 +33,14 @@
package org.opensearch.gradle.precommit;
import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin;
+import org.opensearch.gradle.util.GradleUtils;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.artifacts.ProjectDependency;
+import org.gradle.api.file.FileCollection;
import org.gradle.api.plugins.JavaPlugin;
+import org.gradle.api.provider.Provider;
import org.gradle.api.tasks.TaskProvider;
public class DependencyLicensesPrecommitPlugin extends PrecommitPlugin {
@@ -48,15 +51,16 @@ public TaskProvider extends Task> createTask(Project project) {
TaskProvider dependencyLicenses = project.getTasks()
.register("dependencyLicenses", DependencyLicensesTask.class);
+ final Configuration runtimeClasspath = project.getConfigurations().getByName(JavaPlugin.RUNTIME_CLASSPATH_CONFIGURATION_NAME);
+ final Configuration compileOnly = project.getConfigurations()
+ .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME);
+ final Provider provider = project.provider(
+ () -> GradleUtils.getFiles(project, runtimeClasspath, dependency -> dependency instanceof ProjectDependency == false)
+ .minus(compileOnly)
+ );
+
// only require dependency licenses for non-opensearch deps
- dependencyLicenses.configure(t -> {
- Configuration runtimeClasspath = project.getConfigurations().getByName(JavaPlugin.RUNTIME_CLASSPATH_CONFIGURATION_NAME);
- Configuration compileOnly = project.getConfigurations()
- .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME);
- t.setDependencies(
- runtimeClasspath.fileCollection(dependency -> dependency instanceof ProjectDependency == false).minus(compileOnly)
- );
- });
+ dependencyLicenses.configure(t -> t.getDependencies().set(provider));
// we also create the updateShas helper task that is associated with dependencyLicenses
project.getTasks().register("updateShas", UpdateShasTask.class, t -> t.setParentTask(dependencyLicenses));
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java
index e801681c5c386..7248e0bc14431 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java
@@ -39,6 +39,7 @@
import org.gradle.api.file.FileCollection;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
+import org.gradle.api.provider.Property;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputDirectory;
import org.gradle.api.tasks.InputFiles;
@@ -121,7 +122,7 @@ public class DependencyLicensesTask extends DefaultTask {
/**
* A collection of jar files that should be checked.
*/
- private FileCollection dependencies;
+ private Property dependenciesProvider;
/**
* The directory to find the license and sha files in.
@@ -158,12 +159,11 @@ public void mapping(Map props) {
}
@InputFiles
- public FileCollection getDependencies() {
- return dependencies;
- }
-
- public void setDependencies(FileCollection dependencies) {
- this.dependencies = dependencies;
+ public Property getDependencies() {
+ if (dependenciesProvider == null) {
+ dependenciesProvider = getProject().getObjects().property(FileCollection.class);
+ }
+ return dependenciesProvider;
}
@Optional
@@ -190,6 +190,11 @@ public void ignoreSha(String dep) {
@TaskAction
public void checkDependencies() throws IOException, NoSuchAlgorithmException {
+ if (dependenciesProvider == null) {
+ throw new GradleException("No dependencies variable defined.");
+ }
+
+ final FileCollection dependencies = dependenciesProvider.get();
if (dependencies == null) {
throw new GradleException("No dependencies variable defined.");
}
@@ -226,7 +231,7 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException {
}
}
- checkDependencies(licenses, notices, sources, shaFiles);
+ checkDependencies(dependencies, licenses, notices, sources, shaFiles);
licenses.forEach((item, exists) -> failIfAnyMissing(item, exists, "license"));
@@ -255,6 +260,7 @@ private void failIfAnyMissing(String item, Boolean exists, String type) {
}
private void checkDependencies(
+ FileCollection dependencies,
Map licenses,
Map notices,
Map sources,
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
index efcd01f163089..f7bb708933803 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java
@@ -37,6 +37,7 @@
import org.opensearch.gradle.LoggedExec;
import org.opensearch.gradle.OS;
import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin;
+import org.opensearch.gradle.util.GradleUtils;
import org.gradle.api.DefaultTask;
import org.gradle.api.JavaVersion;
import org.gradle.api.artifacts.Configuration;
@@ -203,11 +204,13 @@ public Set getJarsToScan() {
// or dependencies added as `files(...)`, we can't be sure if those are third party or not.
// err on the side of scanning these to make sure we don't miss anything
Spec reallyThirdParty = dep -> dep.getGroup() != null && dep.getGroup().startsWith("org.opensearch") == false;
- Set jars = getRuntimeConfiguration().getResolvedConfiguration().getFiles(reallyThirdParty);
- Set compileOnlyConfiguration = getProject().getConfigurations()
- .getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)
- .getResolvedConfiguration()
- .getFiles(reallyThirdParty);
+
+ Set jars = GradleUtils.getFiles(getProject(), getRuntimeConfiguration(), reallyThirdParty).getFiles();
+ Set compileOnlyConfiguration = GradleUtils.getFiles(
+ getProject(),
+ getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME),
+ reallyThirdParty
+ ).getFiles();
// don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin
if (compileOnlyConfiguration != null) {
jars.removeAll(compileOnlyConfiguration);
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java
index 3fe08888afb09..de479f3b560b6 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/UpdateShasTask.java
@@ -66,7 +66,7 @@ public UpdateShasTask() {
public void updateShas() throws NoSuchAlgorithmException, IOException {
Set shaFiles = parentTask.get().getShaFiles();
- for (File dependency : parentTask.get().getDependencies()) {
+ for (File dependency : parentTask.get().getDependencies().get()) {
String jarName = dependency.getName();
File shaFile = parentTask.get().getShaFile(jarName);
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java
index e82d8ed73ced2..3352dda98ef66 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/tar/SymbolicLinkPreservingTar.java
@@ -184,7 +184,7 @@ private void visitSymbolicLink(final FileCopyDetailsInternal details) {
visitedSymbolicLinks.add(details.getFile());
final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString(), TarConstants.LF_SYMLINK);
entry.setModTime(getModTime(details));
- entry.setMode(UnixStat.LINK_FLAG | details.getMode());
+ entry.setMode(UnixStat.LINK_FLAG | details.getPermissions().toUnixNumeric());
try {
entry.setLinkName(Files.readSymbolicLink(details.getFile().toPath()).toString());
tar.putArchiveEntry(entry);
@@ -197,7 +197,7 @@ private void visitSymbolicLink(final FileCopyDetailsInternal details) {
private void visitDirectory(final FileCopyDetailsInternal details) {
final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString() + "/");
entry.setModTime(getModTime(details));
- entry.setMode(UnixStat.DIR_FLAG | details.getMode());
+ entry.setMode(UnixStat.DIR_FLAG | details.getPermissions().toUnixNumeric());
try {
tar.putArchiveEntry(entry);
tar.closeArchiveEntry();
@@ -209,7 +209,7 @@ private void visitDirectory(final FileCopyDetailsInternal details) {
private void visitFile(final FileCopyDetailsInternal details) {
final TarArchiveEntry entry = new TarArchiveEntry(details.getRelativePath().getPathString());
entry.setModTime(getModTime(details));
- entry.setMode(UnixStat.FILE_FLAG | details.getMode());
+ entry.setMode(UnixStat.FILE_FLAG | details.getPermissions().toUnixNumeric());
entry.setSize(details.getSize());
try {
tar.putArchiveEntry(entry);
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java
index c9e18426966f9..e8772522b19a4 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java
@@ -34,6 +34,7 @@
import com.avast.gradle.dockercompose.ComposeExtension;
import com.avast.gradle.dockercompose.DockerComposePlugin;
import com.avast.gradle.dockercompose.ServiceInfo;
+import com.avast.gradle.dockercompose.tasks.ComposeBuild;
import com.avast.gradle.dockercompose.tasks.ComposeDown;
import com.avast.gradle.dockercompose.tasks.ComposePull;
import com.avast.gradle.dockercompose.tasks.ComposeUp;
@@ -200,6 +201,7 @@ public void execute(Task task) {
maybeSkipTasks(tasks, dockerSupport, getTaskClass("org.opensearch.gradle.test.RestIntegTestTask"));
maybeSkipTasks(tasks, dockerSupport, TestingConventionsTasks.class);
maybeSkipTasks(tasks, dockerSupport, getTaskClass("org.opensearch.gradle.test.AntFixture"));
+ maybeSkipTasks(tasks, dockerSupport, ComposeBuild.class);
maybeSkipTasks(tasks, dockerSupport, ComposeUp.class);
maybeSkipTasks(tasks, dockerSupport, ComposePull.class);
maybeSkipTasks(tasks, dockerSupport, ComposeDown.class);
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java b/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java
index 031fee2d1127f..428b4a16748e1 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java
@@ -39,12 +39,17 @@
import org.gradle.api.UnknownTaskException;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.artifacts.Dependency;
+import org.gradle.api.artifacts.LenientConfiguration;
+import org.gradle.api.file.FileCollection;
+import org.gradle.api.internal.artifacts.ivyservice.ResolvedFilesCollectingVisitor;
+import org.gradle.api.internal.artifacts.ivyservice.resolveengine.artifact.SelectedArtifactSet;
import org.gradle.api.plugins.JavaBasePlugin;
import org.gradle.api.plugins.JavaPluginExtension;
import org.gradle.api.provider.Provider;
import org.gradle.api.services.BuildService;
import org.gradle.api.services.BuildServiceRegistration;
import org.gradle.api.services.BuildServiceRegistry;
+import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.SourceSet;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.TaskContainer;
@@ -53,6 +58,9 @@
import org.gradle.plugins.ide.eclipse.model.EclipseModel;
import org.gradle.plugins.ide.idea.model.IdeaModel;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -245,4 +253,22 @@ public static String getProjectPathFromTask(String taskPath) {
int lastDelimiterIndex = taskPath.lastIndexOf(":");
return lastDelimiterIndex == 0 ? ":" : taskPath.substring(0, lastDelimiterIndex);
}
+
+ public static FileCollection getFiles(Project project, Configuration cfg, Spec spec) {
+ final LenientConfiguration configuration = cfg.getResolvedConfiguration().getLenientConfiguration();
+ try {
+ // Using reflection here to cover the pre 8.7 releases (since those have no such APIs), the
+ // ResolverResults.LegacyResolverResults.LegacyVisitedArtifactSet::select(...) is not available
+ // on older versions.
+ final MethodHandle mh = MethodHandles.lookup()
+ .findVirtual(configuration.getClass(), "select", MethodType.methodType(SelectedArtifactSet.class, Spec.class))
+ .bindTo(configuration);
+
+ final ResolvedFilesCollectingVisitor visitor = new ResolvedFilesCollectingVisitor();
+ ((SelectedArtifactSet) mh.invoke(spec)).visitArtifacts(visitor, false);
+ return project.files(visitor.getFiles());
+ } catch (Throwable ex) {
+ return project.files(configuration.getFiles(spec));
+ }
+ }
}
diff --git a/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java
index bb216b27128e1..28513710470af 100644
--- a/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java
+++ b/buildSrc/src/test/java/org/opensearch/gradle/precommit/DependencyLicensesTaskTests.java
@@ -344,7 +344,7 @@ private TaskProvider createDependencyLicensesTask(Projec
.register("dependencyLicenses", DependencyLicensesTask.class, new Action() {
@Override
public void execute(DependencyLicensesTask dependencyLicensesTask) {
- dependencyLicensesTask.setDependencies(getDependencies(project));
+ dependencyLicensesTask.getDependencies().set(getDependencies(project));
final Map mappings = new HashMap<>();
mappings.put("from", "groovy-.*");
diff --git a/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java b/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java
index 2deabb752017a..15d6d6cd4c31c 100644
--- a/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java
+++ b/buildSrc/src/test/java/org/opensearch/gradle/precommit/UpdateShasTaskTests.java
@@ -102,7 +102,7 @@ public void whenDependencyExistsButShaNotThenShouldCreateNewShaFile() throws IOE
public void whenDependencyAndWrongShaExistsThenShouldNotOverwriteShaFile() throws IOException, NoSuchAlgorithmException {
project.getDependencies().add("someCompileConfiguration", dependency);
- File groovyJar = task.getParentTask().getDependencies().getFiles().iterator().next();
+ File groovyJar = task.getParentTask().getDependencies().get().getFiles().iterator().next();
String groovyShaName = groovyJar.getName() + ".sha1";
File groovySha = createFileIn(getLicensesDir(project), groovyShaName, "content");
@@ -162,7 +162,7 @@ private TaskProvider createDependencyLicensesTask(Projec
.register("dependencyLicenses", DependencyLicensesTask.class, new Action() {
@Override
public void execute(DependencyLicensesTask dependencyLicensesTask) {
- dependencyLicensesTask.setDependencies(getDependencies(project));
+ dependencyLicensesTask.getDependencies().set(getDependencies(project));
}
});
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index c34409053b915..a99bd4801b7f3 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -1,5 +1,5 @@
opensearch = 3.0.0
-lucene = 9.11.0
+lucene = 9.12.0-snapshot-847316d
bundled_jdk_vendor = adoptium
bundled_jdk = 21.0.3+9
@@ -29,12 +29,12 @@ hdrhistogram = 2.2.2
# when updating the JNA version, also update the version in buildSrc/build.gradle
jna = 5.13.0
-netty = 4.1.110.Final
+netty = 4.1.111.Final
joda = 2.12.7
# project reactor
-reactor_netty = 1.1.19
-reactor = 3.5.17
+reactor_netty = 1.1.20
+reactor = 3.5.18
# client dependencies
httpclient5 = 5.2.1
@@ -74,5 +74,5 @@ jzlib = 1.1.3
resteasy = 6.2.4.Final
# opentelemetry dependencies
-opentelemetry = 1.36.0
-opentelemetrysemconv = 1.23.1-alpha
+opentelemetry = 1.39.0
+opentelemetrysemconv = 1.25.0-alpha
diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java
index 35d9929a649ff..3546776fa3617 100644
--- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java
+++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java
@@ -154,6 +154,9 @@ static Request bulk(BulkRequest bulkRequest) throws IOException {
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
parameters.withPipeline(bulkRequest.pipeline());
parameters.withRouting(bulkRequest.routing());
+ if (bulkRequest.requireAlias() != null) {
+ parameters.withRequireAlias(bulkRequest.requireAlias());
+ }
// Bulk API only supports newline delimited JSON or Smile. Before executing
// the bulk, we need to check that all requests have the same content-type
// and this content-type is supported by the Bulk API.
@@ -232,6 +235,10 @@ static Request bulk(BulkRequest bulkRequest) throws IOException {
metadata.field("_source", updateRequest.fetchSource());
}
}
+
+ if (action.isRequireAlias()) {
+ metadata.field("require_alias", action.isRequireAlias());
+ }
metadata.endObject();
}
metadata.endObject();
@@ -533,7 +540,7 @@ static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throw
Request request;
if (searchTemplateRequest.isSimulate()) {
- request = new Request(HttpGet.METHOD_NAME, "_render/template");
+ request = new Request(HttpGet.METHOD_NAME, "/_render/template");
} else {
SearchRequest searchRequest = searchTemplateRequest.getRequest();
String endpoint = endpoint(searchRequest.indices(), "_search/template");
@@ -796,8 +803,7 @@ static Request termVectors(TermVectorsRequest tvrequest) throws IOException {
}
static Request mtermVectors(MultiTermVectorsRequest mtvrequest) throws IOException {
- String endpoint = "_mtermvectors";
- Request request = new Request(HttpGet.METHOD_NAME, endpoint);
+ Request request = new Request(HttpGet.METHOD_NAME, "/_mtermvectors");
request.setEntity(createEntity(mtvrequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java
index da9f790215669..f5b1b0768ff4a 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/CrudIT.java
@@ -1299,4 +1299,61 @@ public void testMultiTermvectors() throws IOException {
}
}
}
+
+ public void testBulkWithRequireAlias() throws IOException {
+ {
+ String indexAliasName = "testindex-1";
+
+ BulkRequest bulkRequest = new BulkRequest(indexAliasName);
+ bulkRequest.requireAlias(true);
+ bulkRequest.add(new IndexRequest().id("1").source("{ \"name\": \"Biden\" }", XContentType.JSON));
+ bulkRequest.add(new IndexRequest().id("2").source("{ \"name\": \"Trump\" }", XContentType.JSON));
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ {
+ String indexAliasName = "testindex-2";
+
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.requireAlias(true);
+ bulkRequest.add(new IndexRequest().index(indexAliasName).id("1").source("{ \"name\": \"Biden\" }", XContentType.JSON));
+ bulkRequest.add(new IndexRequest().index(indexAliasName).id("2").source("{ \"name\": \"Trump\" }", XContentType.JSON));
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ {
+ String indexAliasName = "testindex-3";
+
+ BulkRequest bulkRequest = new BulkRequest(indexAliasName);
+ bulkRequest.add(new IndexRequest().id("1").setRequireAlias(true).source("{ \"name\": \"Biden\" }", XContentType.JSON));
+ bulkRequest.add(new IndexRequest().id("2").setRequireAlias(true).source("{ \"name\": \"Trump\" }", XContentType.JSON));
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ {
+ String indexAliasName = "testindex-4";
+
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.add(
+ new IndexRequest().index(indexAliasName).id("1").setRequireAlias(true).source("{ \"name\": \"Biden\" }", XContentType.JSON)
+ );
+ bulkRequest.add(
+ new IndexRequest().index(indexAliasName).id("2").setRequireAlias(true).source("{ \"name\": \"Trump\" }", XContentType.JSON)
+ );
+
+ BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, RequestOptions.DEFAULT);
+
+ assertFalse("Should not auto-create the '" + indexAliasName + "' index.", indexExists(indexAliasName));
+ assertTrue("Bulk response must have failures.", bulkResponse.hasFailures());
+ }
+ }
}
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java
index c8eafb88b7495..ccdcc21f0fc8b 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java
@@ -701,7 +701,7 @@ public void testOpenExistingIndex() throws IOException {
closeIndex(index);
ResponseException exception = expectThrows(
ResponseException.class,
- () -> client().performRequest(new Request(HttpGet.METHOD_NAME, index + "/_search"))
+ () -> client().performRequest(new Request(HttpGet.METHOD_NAME, "/" + index + "/_search"))
);
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
assertThat(exception.getMessage().contains(index), equalTo(true));
@@ -714,7 +714,7 @@ public void testOpenExistingIndex() throws IOException {
);
assertTrue(openIndexResponse.isAcknowledged());
- Response response = client().performRequest(new Request(HttpGet.METHOD_NAME, index + "/_search"));
+ Response response = client().performRequest(new Request(HttpGet.METHOD_NAME, "/" + index + "/_search"));
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
}
@@ -771,7 +771,7 @@ public void testCloseExistingIndex() throws IOException {
ResponseException exception = expectThrows(
ResponseException.class,
- () -> client().performRequest(new Request(HttpGet.METHOD_NAME, indexResult.getIndex() + "/_search"))
+ () -> client().performRequest(new Request(HttpGet.METHOD_NAME, "/" + indexResult.getIndex() + "/_search"))
);
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
assertThat(exception.getMessage().contains(indexResult.getIndex()), equalTo(true));
@@ -1270,7 +1270,7 @@ public void testGetAliasesNonExistentIndexOrAlias() throws IOException {
assertThat(getAliasesResponse.getException(), nullValue());
}
createIndex(index, Settings.EMPTY);
- client().performRequest(new Request(HttpPut.METHOD_NAME, index + "/_alias/" + alias));
+ client().performRequest(new Request(HttpPut.METHOD_NAME, "/" + index + "/_alias/" + alias));
{
GetAliasesRequest getAliasesRequest = new GetAliasesRequest().indices(index, "non_existent_index");
GetAliasesResponse getAliasesResponse = execute(
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java
index 47add92ecaccd..01fdd489aa7d8 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/RankEvalIT.java
@@ -121,7 +121,7 @@ public void testRankEvalRequest() throws IOException {
}
// now try this when test2 is closed
- client().performRequest(new Request("POST", "index2/_close"));
+ client().performRequest(new Request("POST", "/index2/_close"));
rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS));
response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync);
}
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java
index 084d754275dec..38f5d9302440e 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java
@@ -1399,7 +1399,7 @@ public void testRenderSearchTemplate() throws Exception {
// Verify that the resulting REST request looks as expected.
Request request = RequestConverters.searchTemplate(searchTemplateRequest);
- String endpoint = "_render/template";
+ String endpoint = "/_render/template";
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals(endpoint, request.getEndpoint());
@@ -1565,7 +1565,7 @@ public void testMultiTermVectors() throws IOException {
Request request = RequestConverters.mtermVectors(mtvRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
- assertEquals("_mtermvectors", request.getEndpoint());
+ assertEquals("/_mtermvectors", request.getEndpoint());
assertToXContentBody(mtvRequest, request.getEntity());
}
@@ -1585,7 +1585,7 @@ public void testMultiTermVectorsWithType() throws IOException {
Request request = RequestConverters.mtermVectors(mtvRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
- assertEquals("_mtermvectors", request.getEndpoint());
+ assertEquals("/_mtermvectors", request.getEndpoint());
assertToXContentBody(mtvRequest, request.getEntity());
}
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java
index c1f1cbf1d0e91..d10dc3df43ee5 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java
@@ -727,7 +727,7 @@ public void testSearchWithSuggest() throws IOException {
}
public void testSearchWithWeirdScriptFields() throws Exception {
- Request doc = new Request("PUT", "test/_doc/1");
+ Request doc = new Request("PUT", "/test/_doc/1");
doc.setJsonEntity("{\"field\":\"value\"}");
client().performRequest(doc);
client().performRequest(new Request("POST", "/test/_refresh"));
@@ -774,7 +774,7 @@ public void testSearchWithWeirdScriptFields() throws Exception {
public void testSearchWithDerivedFields() throws Exception {
// Just testing DerivedField definition from SearchSourceBuilder derivedField()
// We are not testing the full functionality here
- Request doc = new Request("PUT", "test/_doc/1");
+ Request doc = new Request("PUT", "/test/_doc/1");
doc.setJsonEntity("{\"field\":\"value\"}");
client().performRequest(doc);
client().performRequest(new Request("POST", "/test/_refresh"));
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java
index bf0f70304168e..326dde54cfb61 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SearchDocumentationIT.java
@@ -998,7 +998,7 @@ public void onFailure(Exception e) {
protected void registerQueryScript(RestClient restClient) throws IOException {
// tag::register-script
- Request scriptRequest = new Request("POST", "_scripts/title_search");
+ Request scriptRequest = new Request("POST", "/_scripts/title_search");
scriptRequest.setJsonEntity(
"{" +
" \"script\": {" +
diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java
index d0015db044843..6949bc382bfe8 100644
--- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java
@@ -827,7 +827,7 @@ private void createTestIndex() throws IOException {
}
private void createTestSnapshots() throws IOException {
- Request createSnapshot = new Request("put", String.format(Locale.ROOT, "_snapshot/%s/%s", repositoryName, snapshotName));
+ Request createSnapshot = new Request("put", String.format(Locale.ROOT, "/_snapshot/%s/%s", repositoryName, snapshotName));
createSnapshot.addParameter("wait_for_completion", "true");
createSnapshot.setJsonEntity("{\"indices\":\"" + indexName + "\"}");
Response response = highLevelClient().getLowLevelClient().performRequest(createSnapshot);
diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle
index 161b8008525b4..792b1ab57ddbc 100644
--- a/distribution/archives/build.gradle
+++ b/distribution/archives/build.gradle
@@ -39,11 +39,17 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla
with libFiles()
}
into('config') {
- dirMode 0750
- fileMode 0660
+ dirPermissions {
+ unix 0750
+ }
+ filePermissions {
+ unix 0660
+ }
with configFiles(distributionType, java)
from {
- dirMode 0750
+ dirPermissions {
+ unix 0750
+ }
jvmOptionsDir.getParent()
}
}
@@ -61,13 +67,17 @@ CopySpec archiveFiles(CopySpec modulesFiles, String distributionType, String pla
}
into('') {
from {
- dirMode 0755
+ dirPermissions {
+ unix 0755
+ }
logsDir.getParent()
}
}
into('') {
from {
- dirMode 0755
+ dirPermissions {
+ unix 0755
+ }
pluginsDir.getParent()
}
}
diff --git a/distribution/build.gradle b/distribution/build.gradle
index 35ca84ca66dba..36efe2e0d45e8 100644
--- a/distribution/build.gradle
+++ b/distribution/build.gradle
@@ -363,9 +363,9 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
if (it.relativePath.segments[-2] == 'bin' || ((platform == 'darwin-x64' || platform == 'darwin-arm64') && it.relativePath.segments[-2] == 'MacOS')) {
// bin files, wherever they are within modules (eg platform specific) should be executable
// and MacOS is an alternative to bin on macOS
- it.mode = 0755
+ it.permissions(perm -> perm.unix(0755))
} else {
- it.mode = 0644
+ it.permissions(perm -> perm.unix(0644))
}
}
def buildModules = buildModulesTaskProvider
@@ -413,7 +413,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
from '../src/bin'
exclude '*.exe'
exclude '*.bat'
- eachFile { it.setMode(0755) }
+ eachFile { it.permissions(perm -> perm.unix(0755)) }
MavenFilteringHack.filter(it, expansionsForDistribution(distributionType, java))
}
// windows files, only for zip
@@ -431,7 +431,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
}
// module provided bin files
with copySpec {
- eachFile { it.setMode(0755) }
+ eachFile { it.permissions(perm -> perm.unix(0755)) }
from project(':distribution').buildBin
if (distributionType != 'zip') {
exclude '*.bat'
@@ -473,7 +473,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
}
eachFile { FileCopyDetails details ->
if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') {
- details.mode = 0755
+ details.permissions(perm -> perm.unix(0755))
}
if (details.name == 'src.zip') {
details.exclude()
@@ -501,7 +501,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
}
eachFile { FileCopyDetails details ->
if (details.relativePath.segments[-2] == 'bin' || details.relativePath.segments[-1] == 'jspawnhelper') {
- details.mode = 0755
+ details.permissions(perm -> perm.unix(0755))
}
}
}
diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle
index 211b3bd55da60..621620eef9d71 100644
--- a/distribution/packages/build.gradle
+++ b/distribution/packages/build.gradle
@@ -160,7 +160,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
}
from(rootProject.projectDir) {
include 'README.md'
- fileMode 0644
+ filePermissions {
+ unix 0644
+ }
}
into('lib') {
with libFiles()
@@ -183,9 +185,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
directory('/' + segments[0..i].join('/'), 0755)
}
if (segments[-2] == 'bin' || segments[-1] == 'jspawnhelper') {
- fcp.mode = 0755
+ fcp.permissions(perm -> perm.unix(0755))
} else {
- fcp.mode = 0644
+ fcp.permissions(perm -> perm.unix(0644))
}
}
}
@@ -195,7 +197,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
if (type == 'deb') {
into("/usr/share/doc/${packageName}") {
from "${packagingFiles}/copyright"
- fileMode 0644
+ filePermissions {
+ unix 0644
+ }
}
} else {
assert type == 'rpm'
@@ -204,7 +208,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
include 'APACHE-LICENSE-2.0.txt'
rename { 'LICENSE.txt' }
}
- fileMode 0644
+ filePermissions {
+ unix 0644
+ }
}
}
@@ -213,7 +219,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
configurationFile '/etc/opensearch/jvm.options'
configurationFile '/etc/opensearch/log4j2.properties'
from("${packagingFiles}") {
- dirMode 0750
+ dirPermissions {
+ unix 0750
+ }
into('/etc')
permissionGroup 'opensearch'
includeEmptyDirs true
@@ -223,8 +231,12 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
}
from("${packagingFiles}/etc/opensearch") {
into('/etc/opensearch')
- dirMode 0750
- fileMode 0660
+ dirPermissions {
+ unix 0750
+ }
+ filePermissions{
+ unix 0660
+ }
permissionGroup 'opensearch'
includeEmptyDirs true
createDirectoryEntry true
@@ -235,34 +247,46 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
into(new File(envFile).getParent()) {
fileType CONFIG | NOREPLACE
permissionGroup 'opensearch'
- fileMode 0660
+ filePermissions {
+ unix 0660
+ }
from "${packagingFiles}/env/opensearch"
}
// ========= systemd =========
into('/usr/lib/tmpfiles.d') {
from "${packagingFiles}/systemd/opensearch.conf"
- fileMode 0644
+ filePermissions {
+ unix 0644
+ }
}
into('/usr/lib/systemd/system') {
fileType CONFIG | NOREPLACE
from "${packagingFiles}/systemd/opensearch.service"
- fileMode 0644
+ filePermissions {
+ unix 0644
+ }
}
into('/usr/lib/sysctl.d') {
fileType CONFIG | NOREPLACE
from "${packagingFiles}/systemd/sysctl/opensearch.conf"
- fileMode 0644
+ filePermissions {
+ unix 0644
+ }
}
into('/usr/share/opensearch/bin') {
from "${packagingFiles}/systemd/systemd-entrypoint"
- fileMode 0755
+ filePermissions {
+ unix 0755
+ }
}
// ========= sysV init =========
configurationFile '/etc/init.d/opensearch'
into('/etc/init.d') {
- fileMode 0750
+ filePermissions {
+ unix 0750
+ }
fileType CONFIG | NOREPLACE
from "${packagingFiles}/init.d/opensearch"
}
@@ -278,7 +302,9 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) {
createDirectoryEntry true
user u
permissionGroup g
- dirMode mode
+ dirPermissions {
+ unix mode
+ }
}
}
copyEmptyDir('/var/log/opensearch', 'opensearch', 'opensearch', 0750)
@@ -341,7 +367,9 @@ Closure commonDebConfig(boolean jdk, String architecture) {
into('/usr/share/lintian/overrides') {
from('src/deb/lintian/opensearch')
- fileMode 0644
+ filePermissions {
+ unix 0644
+ }
}
}
}
diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml
index 10bab9b3fce92..4115601f62ada 100644
--- a/distribution/src/config/opensearch.yml
+++ b/distribution/src/config/opensearch.yml
@@ -125,3 +125,7 @@ ${path.logs}
# Gates the functionality of enabling Opensearch to use pluggable caches with respective store names via setting.
#
#opensearch.experimental.feature.pluggable.caching.enabled: false
+#
+# Gates the functionality of star tree index, which improves the performance of search aggregations.
+#
+#opensearch.experimental.feature.composite_index.star_tree.enabled: true
diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java
index 726c381db09f6..af7138569972a 100644
--- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java
+++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java
@@ -105,13 +105,8 @@ private static String javaLocaleProviders() {
SPI setting is used to allow loading custom CalendarDataProvider
in jdk8 it has to be loaded from jre/lib/ext,
in jdk9+ it is already within ES project and on a classpath
-
- Due to internationalization enhancements in JDK 9 OpenSearch need to set the provider to COMPAT otherwise time/date
- parsing will break in an incompatible way for some date patterns and locales.
- //TODO COMPAT will be deprecated in at some point, see please https://bugs.openjdk.java.net/browse/JDK-8232906
- See also: documentation in server/org.opensearch.common.time.IsoCalendarDataProvider
*/
- return "-Djava.locale.providers=SPI,COMPAT";
+ return "-Djava.locale.providers=SPI,CLDR";
}
}
diff --git a/gradle/ide.gradle b/gradle/ide.gradle
index ea353f8d92bdd..e266d9add172d 100644
--- a/gradle/ide.gradle
+++ b/gradle/ide.gradle
@@ -81,7 +81,7 @@ if (System.getProperty('idea.active') == 'true') {
}
runConfigurations {
defaults(JUnit) {
- vmParameters = '-ea -Djava.locale.providers=SPI,COMPAT'
+ vmParameters = '-ea -Djava.locale.providers=SPI,CLDR'
if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) {
vmParameters += ' -Djava.security.manager=allow'
}
diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties
index 9b0d73222260e..f5bdef81deb70 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/gradle/wrapper/gradle-wrapper.properties
@@ -11,7 +11,7 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-8.7-all.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-8.8-all.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
-distributionSha256Sum=194717442575a6f96e1c1befa2c30e9a4fc90f701d7aee33eb879b79e7ff05c0
+distributionSha256Sum=f8b4f4772d302c8ff580bc40d0f56e715de69b163546944f787c87abf209c961
diff --git a/gradlew.bat b/gradlew.bat
index 6689b85beecde..7101f8e4676fc 100644
--- a/gradlew.bat
+++ b/gradlew.bat
@@ -43,11 +43,11 @@ set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
-echo.
-echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
+echo. 1>&2
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
goto fail
@@ -57,11 +57,11 @@ set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
-echo.
-echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
-echo.
-echo Please set the JAVA_HOME variable in your environment to match the
-echo location of your Java installation.
+echo. 1>&2
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2
+echo. 1>&2
+echo Please set the JAVA_HOME variable in your environment to match the 1>&2
+echo location of your Java installation. 1>&2
goto fail
diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java
index 8d8a4c7895339..52162e3df0c1c 100644
--- a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java
+++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java
@@ -473,4 +473,17 @@ public void testPublicApiWithProtectedInterface() {
assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
}
+
+ /**
+ * The constructor arguments have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi}
+ */
+ public void testPublicApiConstructorAnnotatedInternalApi() {
+ final CompilerResult result = compile("PublicApiConstructorAnnotatedInternalApi.java", "NotAnnotated.java");
+ assertThat(result, instanceOf(Failure.class));
+
+ final Failure failure = (Failure) result;
+ assertThat(failure.diagnotics(), hasSize(2));
+
+ assertThat(failure.diagnotics(), not(hasItem(matching(Diagnostic.Kind.ERROR))));
+ }
}
diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java
index 9996ba8b736aa..b0b542e127285 100644
--- a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java
+++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/InternalApiAnnotated.java
@@ -8,9 +8,9 @@
package org.opensearch.common.annotation.processor;
-import org.opensearch.common.annotation.PublicApi;
+import org.opensearch.common.annotation.InternalApi;
-@PublicApi(since = "1.0.0")
+@InternalApi
public class InternalApiAnnotated {
}
diff --git a/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorAnnotatedInternalApi.java b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorAnnotatedInternalApi.java
new file mode 100644
index 0000000000000..d355a6b770391
--- /dev/null
+++ b/libs/common/src/test/resources/org/opensearch/common/annotation/processor/PublicApiConstructorAnnotatedInternalApi.java
@@ -0,0 +1,21 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.common.annotation.processor;
+
+import org.opensearch.common.annotation.InternalApi;
+import org.opensearch.common.annotation.PublicApi;
+
+@PublicApi(since = "1.0.0")
+public class PublicApiConstructorAnnotatedInternalApi {
+ /**
+ * The constructors have relaxed semantics at the moment: those could be not annotated or be annotated as {@link InternalApi}
+ */
+ @InternalApi
+ public PublicApiConstructorAnnotatedInternalApi(NotAnnotated arg) {}
+}
diff --git a/libs/core/licenses/lucene-core-9.11.0.jar.sha1 b/libs/core/licenses/lucene-core-9.11.0.jar.sha1
deleted file mode 100644
index b0d38c4165581..0000000000000
--- a/libs/core/licenses/lucene-core-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2e487755a6814b2a1bc770c26569dcba86873dcf
\ No newline at end of file
diff --git a/libs/core/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 b/libs/core/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..e3fd1708ea428
--- /dev/null
+++ b/libs/core/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+51ff4940eb1024184bbaa5dae39695d2392c5bab
\ No newline at end of file
diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java
index 3f680b4ab8e05..b647a92d6708a 100644
--- a/libs/core/src/main/java/org/opensearch/Version.java
+++ b/libs/core/src/main/java/org/opensearch/Version.java
@@ -105,7 +105,9 @@ public class Version implements Comparable, ToXContentFragment {
public static final Version V_2_14_0 = new Version(2140099, org.apache.lucene.util.Version.LUCENE_9_10_0);
public static final Version V_2_14_1 = new Version(2140199, org.apache.lucene.util.Version.LUCENE_9_10_0);
public static final Version V_2_15_0 = new Version(2150099, org.apache.lucene.util.Version.LUCENE_9_10_0);
- public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_11_0);
+ public static final Version V_2_15_1 = new Version(2150199, org.apache.lucene.util.Version.LUCENE_9_10_0);
+ public static final Version V_2_16_0 = new Version(2160099, org.apache.lucene.util.Version.LUCENE_9_11_1);
+ public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_12_0);
public static final Version CURRENT = V_3_0_0;
public static Version fromId(int id) {
diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java b/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java
index 5389538a8c7dd..b8da9787165f8 100644
--- a/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java
+++ b/libs/core/src/main/java/org/opensearch/core/xcontent/filtering/FilterPath.java
@@ -46,7 +46,6 @@
public class FilterPath {
static final FilterPath EMPTY = new FilterPath();
-
private final String filter;
private final String segment;
private final FilterPath next;
@@ -99,32 +98,29 @@ public static FilterPath[] compile(Set filters) {
List paths = new ArrayList<>();
for (String filter : filters) {
- if (filter != null) {
+ if (filter != null && !filter.isEmpty()) {
filter = filter.trim();
if (filter.length() > 0) {
- paths.add(parse(filter, filter));
+ paths.add(parse(filter));
}
}
}
return paths.toArray(new FilterPath[0]);
}
- private static FilterPath parse(final String filter, final String segment) {
- int end = segment.length();
-
- for (int i = 0; i < end;) {
- char c = segment.charAt(i);
+ private static FilterPath parse(final String filter) {
+ // Split the filter into segments using a regex
+ // that avoids splitting escaped dots.
+ String[] segments = filter.split("(?= 0; i--) {
+ // Replace escaped dots with actual dots in the current segment.
+ String segment = segments[i].replaceAll("\\\\.", ".");
+ next = new FilterPath(filter, segment, next);
}
- return new FilterPath(filter, segment.replaceAll("\\\\.", "."), EMPTY);
+
+ return next;
}
@Override
diff --git a/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java b/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java
index 0c5a17b70a956..d3191609f6119 100644
--- a/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java
+++ b/libs/core/src/test/java/org/opensearch/core/xcontent/filtering/FilterPathTests.java
@@ -35,6 +35,7 @@
import org.opensearch.common.util.set.Sets;
import org.opensearch.test.OpenSearchTestCase;
+import java.util.HashSet;
import java.util.Set;
import static java.util.Collections.singleton;
@@ -369,4 +370,20 @@ public void testMultipleFilterPaths() {
assertThat(filterPath.getSegment(), is(emptyString()));
assertSame(filterPath, FilterPath.EMPTY);
}
+
+ public void testCompileWithEmptyString() {
+ Set filters = new HashSet<>();
+ filters.add("");
+ FilterPath[] filterPaths = FilterPath.compile(filters);
+ assertNotNull(filterPaths);
+ assertEquals(0, filterPaths.length);
+ }
+
+ public void testCompileWithNull() {
+ Set filters = new HashSet<>();
+ filters.add(null);
+ FilterPath[] filterPaths = FilterPath.compile(filters);
+ assertNotNull(filterPaths);
+ assertEquals(0, filterPaths.length);
+ }
}
diff --git a/libs/geo/src/main/java/org/opensearch/geometry/utils/WellKnownText.java b/libs/geo/src/main/java/org/opensearch/geometry/utils/WellKnownText.java
index ed1d63e6d4fef..8ad135b8bc1ca 100644
--- a/libs/geo/src/main/java/org/opensearch/geometry/utils/WellKnownText.java
+++ b/libs/geo/src/main/java/org/opensearch/geometry/utils/WellKnownText.java
@@ -49,8 +49,10 @@
import java.io.StreamTokenizer;
import java.io.StringReader;
import java.text.ParseException;
+import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.Deque;
import java.util.List;
import java.util.Locale;
@@ -67,6 +69,7 @@ public class WellKnownText {
public static final String RPAREN = ")";
public static final String COMMA = ",";
public static final String NAN = "NaN";
+ public static final int MAX_DEPTH_OF_GEO_COLLECTION = 1000;
private final String NUMBER = "";
private final String EOF = "END-OF-STREAM";
@@ -278,6 +281,16 @@ public Geometry fromWKT(String wkt) throws IOException, ParseException {
*/
private Geometry parseGeometry(StreamTokenizer stream) throws IOException, ParseException {
final String type = nextWord(stream).toLowerCase(Locale.ROOT);
+ switch (type) {
+ case "geometrycollection":
+ return parseGeometryCollection(stream);
+ default:
+ return parseSimpleGeometry(stream, type);
+ }
+ }
+
+ private Geometry parseSimpleGeometry(StreamTokenizer stream, String type) throws IOException, ParseException {
+ assert "geometrycollection".equals(type) == false;
switch (type) {
case "point":
return parsePoint(stream);
@@ -294,7 +307,7 @@ private Geometry parseGeometry(StreamTokenizer stream) throws IOException, Parse
case "bbox":
return parseBBox(stream);
case "geometrycollection":
- return parseGeometryCollection(stream);
+ throw new IllegalStateException("Unexpected type: geometrycollection");
case "circle": // Not part of the standard, but we need it for internal serialization
return parseCircle(stream);
}
@@ -305,12 +318,56 @@ private GeometryCollection parseGeometryCollection(StreamTokenizer str
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
return GeometryCollection.EMPTY;
}
- List shapes = new ArrayList<>();
- shapes.add(parseGeometry(stream));
- while (nextCloserOrComma(stream).equals(COMMA)) {
- shapes.add(parseGeometry(stream));
+
+ List topLevelShapes = new ArrayList<>();
+ Deque> deque = new ArrayDeque<>();
+ deque.push(topLevelShapes);
+ boolean isFirstIteration = true;
+ List currentLevelShapes = null;
+ while (!deque.isEmpty()) {
+ List previousShapes = deque.pop();
+ if (currentLevelShapes != null) {
+ previousShapes.add(new GeometryCollection<>(currentLevelShapes));
+ }
+ currentLevelShapes = previousShapes;
+
+ if (isFirstIteration == true) {
+ isFirstIteration = false;
+ } else {
+ if (nextCloserOrComma(stream).equals(COMMA) == false) {
+ // Done with current level, continue with parent level
+ continue;
+ }
+ }
+ while (true) {
+ final String type = nextWord(stream).toLowerCase(Locale.ROOT);
+ if (type.equals("geometrycollection")) {
+ if (nextEmptyOrOpen(stream).equals(EMPTY) == false) {
+ // GEOMETRYCOLLECTION() -> 1 depth, GEOMETRYCOLLECTION(GEOMETRYCOLLECTION()) -> 2 depth
+ // When parsing the top level geometry collection, the queue size is zero.
+ // When max depth is 1, we don't want to push any sub geometry collection in the queue.
+ // Therefore, we subtract 2 from max depth.
+ if (deque.size() >= MAX_DEPTH_OF_GEO_COLLECTION - 2) {
+ throw new IllegalArgumentException(
+ "a geometry collection with a depth greater than " + MAX_DEPTH_OF_GEO_COLLECTION + " is not supported"
+ );
+ }
+ deque.push(currentLevelShapes);
+ currentLevelShapes = new ArrayList<>();
+ continue;
+ }
+ currentLevelShapes.add(GeometryCollection.EMPTY);
+ } else {
+ currentLevelShapes.add(parseSimpleGeometry(stream, type));
+ }
+
+ if (nextCloserOrComma(stream).equals(COMMA) == false) {
+ break;
+ }
+ }
}
- return new GeometryCollection<>(shapes);
+
+ return new GeometryCollection<>(topLevelShapes);
}
private Point parsePoint(StreamTokenizer stream) throws IOException, ParseException {
diff --git a/libs/geo/src/test/java/org/opensearch/geometry/GeometryCollectionTests.java b/libs/geo/src/test/java/org/opensearch/geometry/GeometryCollectionTests.java
index 631b6456a77da..cd8bb8f585966 100644
--- a/libs/geo/src/test/java/org/opensearch/geometry/GeometryCollectionTests.java
+++ b/libs/geo/src/test/java/org/opensearch/geometry/GeometryCollectionTests.java
@@ -62,6 +62,11 @@ public void testBasicSerialization() throws IOException, ParseException {
assertEquals("GEOMETRYCOLLECTION EMPTY", wkt.toWKT(GeometryCollection.EMPTY));
assertEquals(GeometryCollection.EMPTY, wkt.fromWKT("GEOMETRYCOLLECTION EMPTY)"));
+
+ assertEquals(
+ new GeometryCollection(Arrays.asList(GeometryCollection.EMPTY)),
+ wkt.fromWKT("GEOMETRYCOLLECTION (GEOMETRYCOLLECTION EMPTY)")
+ );
}
@SuppressWarnings("ConstantConditions")
@@ -86,4 +91,29 @@ public void testInitValidation() {
new StandardValidator(true).validate(new GeometryCollection(Collections.singletonList(new Point(20, 10, 30))));
}
+
+ public void testDeeplyNestedGeometryCollection() throws IOException, ParseException {
+ WellKnownText wkt = new WellKnownText(true, new GeographyValidator(true));
+ StringBuilder validGeometryCollectionHead = new StringBuilder("GEOMETRYCOLLECTION");
+ StringBuilder validGeometryCollectionTail = new StringBuilder(" EMPTY");
+ for (int i = 0; i < WellKnownText.MAX_DEPTH_OF_GEO_COLLECTION - 1; i++) {
+ validGeometryCollectionHead.append(" (GEOMETRYCOLLECTION");
+ validGeometryCollectionTail.append(")");
+ }
+ // Expect no exception
+ wkt.fromWKT(validGeometryCollectionHead.append(validGeometryCollectionTail).toString());
+
+ StringBuilder invalidGeometryCollectionHead = new StringBuilder("GEOMETRYCOLLECTION");
+ StringBuilder invalidGeometryCollectionTail = new StringBuilder(" EMPTY");
+ for (int i = 0; i < WellKnownText.MAX_DEPTH_OF_GEO_COLLECTION; i++) {
+ invalidGeometryCollectionHead.append(" (GEOMETRYCOLLECTION");
+ invalidGeometryCollectionTail.append(")");
+ }
+
+ IllegalArgumentException ex = expectThrows(
+ IllegalArgumentException.class,
+ () -> wkt.fromWKT(invalidGeometryCollectionHead.append(invalidGeometryCollectionTail).toString())
+ );
+ assertEquals("a geometry collection with a depth greater than 1000 is not supported", ex.getMessage());
+ }
}
diff --git a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java
index 537caccbac652..783b6083e9226 100644
--- a/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java
+++ b/modules/cache-common/src/internalClusterTest/java/org/opensearch/cache/common/tier/TieredSpilloverCacheStatsIT.java
@@ -10,6 +10,7 @@
import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.opensearch.action.admin.indices.stats.CommonStatsFlags;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.client.Client;
@@ -20,6 +21,7 @@
import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.index.IndexSettings;
import org.opensearch.index.cache.request.RequestCacheStats;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.indices.IndicesRequestCache;
@@ -351,11 +353,15 @@ private void startIndex(Client client, String indexName) throws InterruptedExcep
.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
.build()
)
.get()
);
indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello"));
+ // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
+ ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(indexName).setFlush(true).get();
ensureSearchable(indexName);
}
diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java
index 63cdbca101f2a..f69c56808b2a1 100644
--- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java
+++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java
@@ -8,6 +8,8 @@
package org.opensearch.cache.common.tier;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
import org.opensearch.cache.common.policy.TookTimePolicy;
import org.opensearch.common.annotation.ExperimentalApi;
import org.opensearch.common.cache.CacheType;
@@ -35,9 +37,13 @@
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.function.ToLongBiFunction;
@@ -61,6 +67,7 @@ public class TieredSpilloverCache implements ICache {
// Used to avoid caching stale entries in lower tiers.
private static final List SPILLOVER_REMOVAL_REASONS = List.of(RemovalReason.EVICTED, RemovalReason.CAPACITY);
+ private static final Logger logger = LogManager.getLogger(TieredSpilloverCache.class);
private final ICache diskCache;
private final ICache onHeapCache;
@@ -86,6 +93,12 @@ public class TieredSpilloverCache implements ICache {
private final Map, TierInfo> caches;
private final List> policies;
+ /**
+ * This map is used to handle concurrent requests for same key in computeIfAbsent() to ensure we load the value
+ * only once.
+ */
+ Map, CompletableFuture, V>>> completableFutureMap = new ConcurrentHashMap<>();
+
TieredSpilloverCache(Builder builder) {
Objects.requireNonNull(builder.onHeapCacheFactory, "onHeap cache builder can't be null");
Objects.requireNonNull(builder.diskCacheFactory, "disk cache builder can't be null");
@@ -182,7 +195,16 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V>
// and it only has to be loaded one time, we should report one miss and the rest hits. But, if we do stats in
// getValueFromTieredCache(),
// we will see all misses. Instead, handle stats in computeIfAbsent().
- Tuple cacheValueTuple = getValueFromTieredCache(false).apply(key);
+ Tuple cacheValueTuple;
+ CompletableFuture, V>> future = null;
+ try (ReleasableLock ignore = readLock.acquire()) {
+ cacheValueTuple = getValueFromTieredCache(false).apply(key);
+ if (cacheValueTuple == null) {
+ // Only one of the threads will succeed putting a future into map for the same key.
+ // Rest will fetch existing future and wait on that to complete.
+ future = completableFutureMap.putIfAbsent(key, new CompletableFuture<>());
+ }
+ }
List heapDimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, TIER_DIMENSION_VALUE_ON_HEAP);
List diskDimensionValues = statsHolder.getDimensionsWithTierValue(key.dimensions, TIER_DIMENSION_VALUE_DISK);
@@ -190,10 +212,7 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V>
// Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside.
// This is needed as there can be many requests for the same key at the same time and we only want to load
// the value once.
- V value = null;
- try (ReleasableLock ignore = writeLock.acquire()) {
- value = onHeapCache.computeIfAbsent(key, loader);
- }
+ V value = compute(key, loader, future);
// Handle stats
if (loader.isLoaded()) {
// The value was just computed and added to the cache by this thread. Register a miss for the heap cache, and the disk cache
@@ -222,6 +241,55 @@ public V computeIfAbsent(ICacheKey key, LoadAwareCacheLoader, V>
return cacheValueTuple.v1();
}
+ private V compute(ICacheKey key, LoadAwareCacheLoader, V> loader, CompletableFuture, V>> future)
+ throws Exception {
+ // Handler to handle results post processing. Takes a tuple or exception as an input and returns
+ // the value. Also before returning value, puts the value in cache.
+ BiFunction, V>, Throwable, Void> handler = (pair, ex) -> {
+ if (pair != null) {
+ try (ReleasableLock ignore = writeLock.acquire()) {
+ onHeapCache.put(pair.v1(), pair.v2());
+ } catch (Exception e) {
+ // TODO: Catch specific exceptions to know whether this resulted from cache or underlying removal
+ // listeners/stats. Needs better exception handling at underlying layers.For now swallowing
+ // exception.
+ logger.warn("Exception occurred while putting item onto heap cache", e);
+ }
+ } else {
+ if (ex != null) {
+ logger.warn("Exception occurred while trying to compute the value", ex);
+ }
+ }
+ completableFutureMap.remove(key);// Remove key from map as not needed anymore.
+ return null;
+ };
+ V value = null;
+ if (future == null) {
+ future = completableFutureMap.get(key);
+ future.handle(handler);
+ try {
+ value = loader.load(key);
+ } catch (Exception ex) {
+ future.completeExceptionally(ex);
+ throw new ExecutionException(ex);
+ }
+ if (value == null) {
+ NullPointerException npe = new NullPointerException("Loader returned a null value");
+ future.completeExceptionally(npe);
+ throw new ExecutionException(npe);
+ } else {
+ future.complete(new Tuple<>(key, value));
+ }
+ } else {
+ try {
+ value = future.get().v2();
+ } catch (InterruptedException ex) {
+ throw new IllegalStateException(ex);
+ }
+ }
+ return value;
+ }
+
@Override
public void invalidate(ICacheKey key) {
// We are trying to invalidate the key from all caches though it would be present in only of them.
@@ -328,12 +396,22 @@ void handleRemovalFromHeapTier(RemovalNotification, V> notification
ICacheKey key = notification.getKey();
boolean wasEvicted = SPILLOVER_REMOVAL_REASONS.contains(notification.getRemovalReason());
boolean countEvictionTowardsTotal = false; // Don't count this eviction towards the cache's total if it ends up in the disk tier
- if (caches.get(diskCache).isEnabled() && wasEvicted && evaluatePolicies(notification.getValue())) {
+ boolean exceptionOccurredOnDiskCachePut = false;
+ boolean canCacheOnDisk = caches.get(diskCache).isEnabled() && wasEvicted && evaluatePolicies(notification.getValue());
+ if (canCacheOnDisk) {
try (ReleasableLock ignore = writeLock.acquire()) {
diskCache.put(key, notification.getValue()); // spill over to the disk tier and increment its stats
+ } catch (Exception ex) {
+ // TODO: Catch specific exceptions. Needs better exception handling. We are just swallowing exception
+ // in this case as it shouldn't cause upstream request to fail.
+ logger.warn("Exception occurred while putting item to disk cache", ex);
+ exceptionOccurredOnDiskCachePut = true;
}
- updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue());
- } else {
+ if (!exceptionOccurredOnDiskCachePut) {
+ updateStatsOnPut(TIER_DIMENSION_VALUE_DISK, key, notification.getValue());
+ }
+ }
+ if (!canCacheOnDisk || exceptionOccurredOnDiskCachePut) {
// If the value is not going to the disk cache, send this notification to the TSC's removal listener
// as the value is leaving the TSC entirely
removalListener.onRemoval(notification);
diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java
index 54b15f236a418..c6440a1e1797f 100644
--- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java
+++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java
@@ -44,8 +44,12 @@
import java.util.UUID;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.util.concurrent.Phaser;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.function.Predicate;
@@ -56,6 +60,10 @@
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
public class TieredSpilloverCacheTests extends OpenSearchTestCase {
static final List dimensionNames = List.of("dim1", "dim2", "dim3");
@@ -408,6 +416,7 @@ public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception {
assertEquals(onHeapCacheHit, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP));
assertEquals(cacheMiss + numOfItems1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK));
assertEquals(diskCacheHit, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK));
+ assertEquals(0, tieredSpilloverCache.completableFutureMap.size());
}
public void testComputeIfAbsentWithEvictionsFromTieredCache() throws Exception {
@@ -751,7 +760,7 @@ public void testInvalidateAll() throws Exception {
}
public void testComputeIfAbsentConcurrently() throws Exception {
- int onHeapCacheSize = randomIntBetween(100, 300);
+ int onHeapCacheSize = randomIntBetween(500, 700);
int diskCacheSize = randomIntBetween(200, 400);
int keyValueSize = 50;
@@ -773,7 +782,7 @@ public void testComputeIfAbsentConcurrently() throws Exception {
0
);
- int numberOfSameKeys = randomIntBetween(10, onHeapCacheSize - 1);
+ int numberOfSameKeys = randomIntBetween(400, onHeapCacheSize - 1);
ICacheKey key = getICacheKey(UUID.randomUUID().toString());
String value = UUID.randomUUID().toString();
@@ -802,7 +811,7 @@ public String load(ICacheKey key) {
};
loadAwareCacheLoaderList.add(loadAwareCacheLoader);
phaser.arriveAndAwaitAdvance();
- tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader);
+ assertEquals(value, tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader));
} catch (Exception e) {
throw new RuntimeException(e);
}
@@ -811,7 +820,7 @@ public String load(ICacheKey key) {
threads[i].start();
}
phaser.arriveAndAwaitAdvance();
- countDownLatch.await(); // Wait for rest of tasks to be cancelled.
+ countDownLatch.await();
int numberOfTimesKeyLoaded = 0;
assertEquals(numberOfSameKeys, loadAwareCacheLoaderList.size());
for (int i = 0; i < loadAwareCacheLoaderList.size(); i++) {
@@ -824,6 +833,215 @@ public String load(ICacheKey key) {
// We should see only one heap miss, and the rest hits
assertEquals(1, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP));
assertEquals(numberOfSameKeys - 1, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP));
+ assertEquals(0, tieredSpilloverCache.completableFutureMap.size());
+ }
+
+ public void testComputIfAbsentConcurrentlyWithMultipleKeys() throws Exception {
+ int onHeapCacheSize = randomIntBetween(300, 500);
+ int diskCacheSize = randomIntBetween(600, 700);
+ int keyValueSize = 50;
+
+ MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>();
+ Settings settings = Settings.builder()
+ .put(
+ OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
+ .get(MAXIMUM_SIZE_IN_BYTES_KEY)
+ .getKey(),
+ onHeapCacheSize * keyValueSize + "b"
+ )
+ .build();
+
+ TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache(
+ keyValueSize,
+ diskCacheSize,
+ removalListener,
+ settings,
+ 0
+ );
+
+ int iterations = 10;
+ int numberOfKeys = 20;
+ List> iCacheKeyList = new ArrayList<>();
+ for (int i = 0; i < numberOfKeys; i++) {
+ ICacheKey key = getICacheKey(UUID.randomUUID().toString());
+ iCacheKeyList.add(key);
+ }
+ ExecutorService executorService = Executors.newFixedThreadPool(8);
+ CountDownLatch countDownLatch = new CountDownLatch(iterations * numberOfKeys); // To wait for all threads to finish.
+
+ List, String>> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>();
+ for (int j = 0; j < numberOfKeys; j++) {
+ int finalJ = j;
+ for (int i = 0; i < iterations; i++) {
+ executorService.submit(() -> {
+ try {
+ LoadAwareCacheLoader, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() {
+ boolean isLoaded = false;
+
+ @Override
+ public boolean isLoaded() {
+ return isLoaded;
+ }
+
+ @Override
+ public String load(ICacheKey key) {
+ isLoaded = true;
+ return iCacheKeyList.get(finalJ).key;
+ }
+ };
+ loadAwareCacheLoaderList.add(loadAwareCacheLoader);
+ tieredSpilloverCache.computeIfAbsent(iCacheKeyList.get(finalJ), loadAwareCacheLoader);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ countDownLatch.countDown();
+ }
+ });
+ }
+ }
+ countDownLatch.await();
+ int numberOfTimesKeyLoaded = 0;
+ assertEquals(iterations * numberOfKeys, loadAwareCacheLoaderList.size());
+ for (int i = 0; i < loadAwareCacheLoaderList.size(); i++) {
+ LoadAwareCacheLoader, String> loader = loadAwareCacheLoaderList.get(i);
+ if (loader.isLoaded()) {
+ numberOfTimesKeyLoaded++;
+ }
+ }
+ assertEquals(numberOfKeys, numberOfTimesKeyLoaded); // It should be loaded only once.
+ // We should see only one heap miss, and the rest hits
+ assertEquals(numberOfKeys, getMissesForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP));
+ assertEquals((iterations * numberOfKeys) - numberOfKeys, getHitsForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_ON_HEAP));
+ assertEquals(0, tieredSpilloverCache.completableFutureMap.size());
+ executorService.shutdownNow();
+ }
+
+ public void testComputeIfAbsentConcurrentlyAndThrowsException() throws Exception {
+ LoadAwareCacheLoader, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() {
+ boolean isLoaded = false;
+
+ @Override
+ public boolean isLoaded() {
+ return isLoaded;
+ }
+
+ @Override
+ public String load(ICacheKey key) {
+ throw new RuntimeException("Testing");
+ }
+ };
+ verifyComputeIfAbsentThrowsException(RuntimeException.class, loadAwareCacheLoader, "Testing");
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public void testComputeIfAbsentWithOnHeapCacheThrowingExceptionOnPut() throws Exception {
+ int onHeapCacheSize = randomIntBetween(100, 300);
+ int diskCacheSize = randomIntBetween(200, 400);
+ int keyValueSize = 50;
+
+ MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>();
+ Settings settings = Settings.builder()
+ .put(
+ OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
+ .get(MAXIMUM_SIZE_IN_BYTES_KEY)
+ .getKey(),
+ onHeapCacheSize * keyValueSize + "b"
+ )
+ .build();
+ ICache.Factory onHeapCacheFactory = mock(OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.class);
+ ICache mockOnHeapCache = mock(ICache.class);
+ when(onHeapCacheFactory.create(any(), any(), any())).thenReturn(mockOnHeapCache);
+ doThrow(new RuntimeException("Testing")).when(mockOnHeapCache).put(any(), any());
+ CacheConfig cacheConfig = getCacheConfig(keyValueSize, settings, removalListener);
+ ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(0, diskCacheSize, false);
+
+ TieredSpilloverCache tieredSpilloverCache = getTieredSpilloverCache(
+ onHeapCacheFactory,
+ mockDiskCacheFactory,
+ cacheConfig,
+ null,
+ removalListener
+ );
+ String value = "";
+ value = tieredSpilloverCache.computeIfAbsent(getICacheKey("test"), new LoadAwareCacheLoader<>() {
+ @Override
+ public boolean isLoaded() {
+ return false;
+ }
+
+ @Override
+ public String load(ICacheKey key) {
+ return "test";
+ }
+ });
+ assertEquals("test", value);
+ assertEquals(0, tieredSpilloverCache.completableFutureMap.size());
+ }
+
+ @SuppressWarnings({ "rawtypes", "unchecked" })
+ public void testComputeIfAbsentWithDiskCacheThrowingExceptionOnPut() throws Exception {
+ int onHeapCacheSize = 0;
+ int keyValueSize = 50;
+
+ MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>();
+ Settings settings = Settings.builder()
+ .put(
+ OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
+ .get(MAXIMUM_SIZE_IN_BYTES_KEY)
+ .getKey(),
+ onHeapCacheSize * keyValueSize + "b"
+ )
+ .build();
+ ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory();
+ CacheConfig cacheConfig = getCacheConfig(keyValueSize, settings, removalListener);
+ ICache.Factory mockDiskCacheFactory = mock(MockDiskCache.MockDiskCacheFactory.class);
+ ICache mockDiskCache = mock(ICache.class);
+ when(mockDiskCacheFactory.create(any(), any(), any())).thenReturn(mockDiskCache);
+ doThrow(new RuntimeException("Test")).when(mockDiskCache).put(any(), any());
+
+ TieredSpilloverCache tieredSpilloverCache = getTieredSpilloverCache(
+ onHeapCacheFactory,
+ mockDiskCacheFactory,
+ cacheConfig,
+ null,
+ removalListener
+ );
+
+ String response = "";
+ response = tieredSpilloverCache.computeIfAbsent(getICacheKey("test"), new LoadAwareCacheLoader<>() {
+ @Override
+ public boolean isLoaded() {
+ return false;
+ }
+
+ @Override
+ public String load(ICacheKey key) {
+ return "test";
+ }
+ });
+ ImmutableCacheStats diskStats = getStatsSnapshotForTier(tieredSpilloverCache, TIER_DIMENSION_VALUE_DISK);
+
+ assertEquals(0, diskStats.getSizeInBytes());
+ assertEquals(1, removalListener.evictionsMetric.count());
+ assertEquals("test", response);
+ assertEquals(0, tieredSpilloverCache.completableFutureMap.size());
+ }
+
+ public void testComputeIfAbsentConcurrentlyWithLoaderReturningNull() throws Exception {
+ LoadAwareCacheLoader, String> loadAwareCacheLoader = new LoadAwareCacheLoader<>() {
+ boolean isLoaded = false;
+
+ @Override
+ public boolean isLoaded() {
+ return isLoaded;
+ }
+
+ @Override
+ public String load(ICacheKey key) {
+ return null;
+ }
+ };
+ verifyComputeIfAbsentThrowsException(NullPointerException.class, loadAwareCacheLoader, "Loader returned a null value");
}
public void testConcurrencyForEvictionFlowFromOnHeapToDiskTier() throws Exception {
@@ -1408,6 +1626,26 @@ public boolean isLoaded() {
};
}
+ private TieredSpilloverCache getTieredSpilloverCache(
+ ICache.Factory onHeapCacheFactory,
+ ICache.Factory mockDiskCacheFactory,
+ CacheConfig cacheConfig,
+ List> policies,
+ RemovalListener, String> removalListener
+ ) {
+ TieredSpilloverCache.Builder builder = new TieredSpilloverCache.Builder().setCacheType(
+ CacheType.INDICES_REQUEST_CACHE
+ )
+ .setRemovalListener(removalListener)
+ .setOnHeapCacheFactory(onHeapCacheFactory)
+ .setDiskCacheFactory(mockDiskCacheFactory)
+ .setCacheConfig(cacheConfig);
+ if (policies != null) {
+ builder.addPolicies(policies);
+ }
+ return builder.build();
+ }
+
private TieredSpilloverCache initializeTieredSpilloverCache(
int keyValueSize,
int diskCacheSize,
@@ -1450,17 +1688,34 @@ private TieredSpilloverCache intializeTieredSpilloverCache(
.build();
ICache.Factory mockDiskCacheFactory = new MockDiskCache.MockDiskCacheFactory(diskDeliberateDelay, diskCacheSize, false);
- TieredSpilloverCache.Builder builder = new TieredSpilloverCache.Builder().setCacheType(
- CacheType.INDICES_REQUEST_CACHE
- )
+ return getTieredSpilloverCache(onHeapCacheFactory, mockDiskCacheFactory, cacheConfig, policies, removalListener);
+ }
+
+ private CacheConfig getCacheConfig(
+ int keyValueSize,
+ Settings settings,
+ RemovalListener, String> removalListener
+ ) {
+ return new CacheConfig.Builder().setKeyType(String.class)
+ .setKeyType(String.class)
+ .setWeigher((k, v) -> keyValueSize)
+ .setSettings(settings)
+ .setDimensionNames(dimensionNames)
.setRemovalListener(removalListener)
- .setOnHeapCacheFactory(onHeapCacheFactory)
- .setDiskCacheFactory(mockDiskCacheFactory)
- .setCacheConfig(cacheConfig);
- if (policies != null) {
- builder.addPolicies(policies);
- }
- return builder.build();
+ .setKeySerializer(new StringSerializer())
+ .setValueSerializer(new StringSerializer())
+ .setSettings(
+ Settings.builder()
+ .put(
+ CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(),
+ TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME
+ )
+ .put(FeatureFlags.PLUGGABLE_CACHE, "true")
+ .put(settings)
+ .build()
+ )
+ .setClusterSettings(clusterSettings)
+ .build();
}
// Helper functions for extracting tier aggregated stats.
@@ -1501,6 +1756,66 @@ private ImmutableCacheStats getStatsSnapshotForTier(TieredSpilloverCache, ?> t
return snapshot;
}
+ private void verifyComputeIfAbsentThrowsException(
+ Class extends Exception> expectedException,
+ LoadAwareCacheLoader, String> loader,
+ String expectedExceptionMessage
+ ) throws InterruptedException {
+ int onHeapCacheSize = randomIntBetween(100, 300);
+ int diskCacheSize = randomIntBetween(200, 400);
+ int keyValueSize = 50;
+
+ MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>();
+ Settings settings = Settings.builder()
+ .put(
+ OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE)
+ .get(MAXIMUM_SIZE_IN_BYTES_KEY)
+ .getKey(),
+ onHeapCacheSize * keyValueSize + "b"
+ )
+ .build();
+
+ TieredSpilloverCache tieredSpilloverCache = initializeTieredSpilloverCache(
+ keyValueSize,
+ diskCacheSize,
+ removalListener,
+ settings,
+ 0
+ );
+
+ int numberOfSameKeys = randomIntBetween(10, onHeapCacheSize - 1);
+ ICacheKey key = getICacheKey(UUID.randomUUID().toString());
+ String value = UUID.randomUUID().toString();
+ AtomicInteger exceptionCount = new AtomicInteger();
+
+ Thread[] threads = new Thread[numberOfSameKeys];
+ Phaser phaser = new Phaser(numberOfSameKeys + 1);
+ CountDownLatch countDownLatch = new CountDownLatch(numberOfSameKeys); // To wait for all threads to finish.
+
+ for (int i = 0; i < numberOfSameKeys; i++) {
+ threads[i] = new Thread(() -> {
+ try {
+ phaser.arriveAndAwaitAdvance();
+ tieredSpilloverCache.computeIfAbsent(key, loader);
+ } catch (Exception e) {
+ exceptionCount.incrementAndGet();
+ assertEquals(ExecutionException.class, e.getClass());
+ assertEquals(expectedException, e.getCause().getClass());
+ assertEquals(expectedExceptionMessage, e.getCause().getMessage());
+ } finally {
+ countDownLatch.countDown();
+ }
+ });
+ threads[i].start();
+ }
+ phaser.arriveAndAwaitAdvance();
+ countDownLatch.await(); // Wait for rest of tasks to be cancelled.
+
+ // Verify exception count was equal to number of requests
+ assertEquals(numberOfSameKeys, exceptionCount.get());
+ assertEquals(0, tieredSpilloverCache.completableFutureMap.size());
+ }
+
private ImmutableCacheStats getTotalStatsSnapshot(TieredSpilloverCache, ?> tsc) throws IOException {
ImmutableCacheStatsHolder cacheStats = tsc.stats(new String[0]);
return cacheStats.getStatsForDimensionValues(List.of());
diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java
index c968fb2f6c2da..c84892971c87e 100644
--- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java
+++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CommunityIdProcessor.java
@@ -29,7 +29,7 @@
* Processor that generating community id flow hash for the network flow tuples, the algorithm is defined in
* Community ID Flow Hashing.
*/
-public class CommunityIdProcessor extends AbstractProcessor {
+public final class CommunityIdProcessor extends AbstractProcessor {
public static final String TYPE = "community_id";
// the version of the community id flow hashing algorithm
private static final String COMMUNITY_ID_HASH_VERSION = "1";
diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FingerprintProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FingerprintProcessor.java
new file mode 100644
index 0000000000000..c2f59bf586c81
--- /dev/null
+++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/FingerprintProcessor.java
@@ -0,0 +1,279 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.ingest.common;
+
+import org.opensearch.common.Nullable;
+import org.opensearch.common.hash.MessageDigests;
+import org.opensearch.core.common.Strings;
+import org.opensearch.ingest.AbstractProcessor;
+import org.opensearch.ingest.ConfigurationUtils;
+import org.opensearch.ingest.IngestDocument;
+import org.opensearch.ingest.Processor;
+
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.util.Base64;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException;
+
+/**
+ * Processor that generating hash value for the specified fields or fields not in the specified excluded list
+ */
+public final class FingerprintProcessor extends AbstractProcessor {
+ public static final String TYPE = "fingerprint";
+ // this processor is introduced in 2.16.0, we append the OpenSearch version to the hash method name to ensure
+ // that this processor always generates same hash value based on a specific hash method, if the processing logic
+ // of this processor changes in future version, the version number in the hash method should be increased correspondingly.
+ private static final Set HASH_METHODS = Set.of("MD5@2.16.0", "SHA-1@2.16.0", "SHA-256@2.16.0", "SHA3-256@2.16.0");
+
+ // fields used to generate hash value
+ private final List fields;
+ // all fields other than the excluded fields are used to generate hash value
+ private final List excludeFields;
+ // the target field to store the hash value, defaults to fingerprint
+ private final String targetField;
+ // hash method used to generate the hash value, defaults to SHA-1
+ private final String hashMethod;
+ private final boolean ignoreMissing;
+
+ FingerprintProcessor(
+ String tag,
+ String description,
+ @Nullable List fields,
+ @Nullable List excludeFields,
+ String targetField,
+ String hashMethod,
+ boolean ignoreMissing
+ ) {
+ super(tag, description);
+ if (fields != null && !fields.isEmpty()) {
+ if (fields.stream().anyMatch(Strings::isNullOrEmpty)) {
+ throw new IllegalArgumentException("field name in [fields] cannot be null nor empty");
+ }
+ if (excludeFields != null && !excludeFields.isEmpty()) {
+ throw new IllegalArgumentException("either fields or exclude_fields can be set");
+ }
+ }
+ if (excludeFields != null && !excludeFields.isEmpty() && excludeFields.stream().anyMatch(Strings::isNullOrEmpty)) {
+ throw new IllegalArgumentException("field name in [exclude_fields] cannot be null nor empty");
+ }
+
+ if (!HASH_METHODS.contains(hashMethod.toUpperCase(Locale.ROOT))) {
+ throw new IllegalArgumentException("hash method must be MD5@2.16.0, SHA-1@2.16.0 or SHA-256@2.16.0 or SHA3-256@2.16.0");
+ }
+ this.fields = fields;
+ this.excludeFields = excludeFields;
+ this.targetField = targetField;
+ this.hashMethod = hashMethod;
+ this.ignoreMissing = ignoreMissing;
+ }
+
+ public List getFields() {
+ return fields;
+ }
+
+ public List getExcludeFields() {
+ return excludeFields;
+ }
+
+ public String getTargetField() {
+ return targetField;
+ }
+
+ public String getHashMethod() {
+ return hashMethod;
+ }
+
+ public boolean isIgnoreMissing() {
+ return ignoreMissing;
+ }
+
+ @Override
+ public IngestDocument execute(IngestDocument document) {
+ // we should deduplicate and sort the field names to make sure we can get consistent hash value
+ final List sortedFields;
+ Set existingFields = new HashSet<>(document.getSourceAndMetadata().keySet());
+ Set metadataFields = document.getMetadata()
+ .keySet()
+ .stream()
+ .map(IngestDocument.Metadata::getFieldName)
+ .collect(Collectors.toSet());
+ // metadata fields such as _index, _id and _routing are ignored
+ if (fields != null && !fields.isEmpty()) {
+ sortedFields = fields.stream()
+ .distinct()
+ .filter(field -> !metadataFields.contains(field))
+ .sorted()
+ .collect(Collectors.toList());
+ } else if (excludeFields != null && !excludeFields.isEmpty()) {
+ sortedFields = existingFields.stream()
+ .filter(field -> !metadataFields.contains(field) && !excludeFields.contains(field))
+ .sorted()
+ .collect(Collectors.toList());
+ } else {
+ sortedFields = existingFields.stream().filter(field -> !metadataFields.contains(field)).sorted().collect(Collectors.toList());
+ }
+ assert (!sortedFields.isEmpty());
+
+ final StringBuilder concatenatedFields = new StringBuilder();
+ sortedFields.forEach(field -> {
+ if (!document.hasField(field)) {
+ if (ignoreMissing) {
+ return;
+ } else {
+ throw new IllegalArgumentException("field [" + field + "] doesn't exist");
+ }
+ }
+
+ final Object value = document.getFieldValue(field, Object.class);
+ if (value instanceof Map) {
+ @SuppressWarnings("unchecked")
+ Map flattenedMap = toFlattenedMap((Map) value);
+ flattenedMap.entrySet().stream().sorted(Map.Entry.comparingByKey()).forEach(entry -> {
+ String fieldValue = String.valueOf(entry.getValue());
+ concatenatedFields.append("|")
+ .append(field)
+ .append(".")
+ .append(entry.getKey())
+ .append("|")
+ .append(fieldValue.length())
+ .append(":")
+ .append(fieldValue);
+ });
+ } else {
+ String fieldValue = String.valueOf(value);
+ concatenatedFields.append("|").append(field).append("|").append(fieldValue.length()).append(":").append(fieldValue);
+ }
+ });
+ // if all specified fields don't exist and ignore_missing is true, then do nothing
+ if (concatenatedFields.length() == 0) {
+ return document;
+ }
+ concatenatedFields.append("|");
+
+ MessageDigest messageDigest = HashMethod.fromMethodName(hashMethod);
+ assert (messageDigest != null);
+ messageDigest.update(concatenatedFields.toString().getBytes(StandardCharsets.UTF_8));
+ document.setFieldValue(targetField, hashMethod + ":" + Base64.getEncoder().encodeToString(messageDigest.digest()));
+
+ return document;
+ }
+
+ @Override
+ public String getType() {
+ return TYPE;
+ }
+
+ /**
+ * Convert a map containing nested fields to a flattened map,
+ * for example, if the original map is
+ * {
+ * "a": {
+ * "b": 1,
+ * "c": 2
+ * }
+ * }, then the converted map is
+ * {
+ * "a.b": 1,
+ * "a.c": 2
+ * }
+ * @param map the original map which may contain nested fields
+ * @return a flattened map which has only one level fields
+ */
+ @SuppressWarnings("unchecked")
+ private Map toFlattenedMap(Map map) {
+ Map flattenedMap = new HashMap<>();
+ for (Map.Entry entry : map.entrySet()) {
+ if (entry.getValue() instanceof Map) {
+ toFlattenedMap((Map) entry.getValue()).forEach(
+ (key, value) -> flattenedMap.put(entry.getKey() + "." + key, value)
+ );
+ } else {
+ flattenedMap.put(entry.getKey(), entry.getValue());
+ }
+ }
+ return flattenedMap;
+ }
+
+ /**
+ * The supported hash methods used to generate hash value
+ */
+ enum HashMethod {
+ MD5(MessageDigests.md5()),
+ SHA1(MessageDigests.sha1()),
+ SHA256(MessageDigests.sha256()),
+ SHA3256(MessageDigests.sha3256());
+
+ private final MessageDigest messageDigest;
+
+ HashMethod(MessageDigest messageDigest) {
+ this.messageDigest = messageDigest;
+ }
+
+ public static MessageDigest fromMethodName(String methodName) {
+ String name = methodName.toUpperCase(Locale.ROOT);
+ switch (name) {
+ case "MD5@2.16.0":
+ return MD5.messageDigest;
+ case "SHA-1@2.16.0":
+ return SHA1.messageDigest;
+ case "SHA-256@2.16.0":
+ return SHA256.messageDigest;
+ case "SHA3-256@2.16.0":
+ return SHA3256.messageDigest;
+ default:
+ return null;
+ }
+ }
+ }
+
+ public static final class Factory implements Processor.Factory {
+ @Override
+ public FingerprintProcessor create(
+ Map registry,
+ String processorTag,
+ String description,
+ Map config
+ ) throws Exception {
+ List fields = ConfigurationUtils.readOptionalList(TYPE, processorTag, config, "fields");
+ List excludeFields = ConfigurationUtils.readOptionalList(TYPE, processorTag, config, "exclude_fields");
+ if (fields != null && !fields.isEmpty()) {
+ if (fields.stream().anyMatch(Strings::isNullOrEmpty)) {
+ throw newConfigurationException(TYPE, processorTag, "fields", "field name cannot be null nor empty");
+ }
+ if (excludeFields != null && !excludeFields.isEmpty()) {
+ throw newConfigurationException(TYPE, processorTag, "fields", "either fields or exclude_fields can be set");
+ }
+ }
+ if (excludeFields != null && !excludeFields.isEmpty() && excludeFields.stream().anyMatch(Strings::isNullOrEmpty)) {
+ throw newConfigurationException(TYPE, processorTag, "exclude_fields", "field name cannot be null nor empty");
+ }
+
+ String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field", "fingerprint");
+ String hashMethod = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "hash_method", "SHA-1@2.16.0");
+ if (!HASH_METHODS.contains(hashMethod.toUpperCase(Locale.ROOT))) {
+ throw newConfigurationException(
+ TYPE,
+ processorTag,
+ "hash_method",
+ "hash method must be MD5@2.16.0, SHA-1@2.16.0, SHA-256@2.16.0 or SHA3-256@2.16.0"
+ );
+ }
+ boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false);
+ return new FingerprintProcessor(processorTag, description, fields, excludeFields, targetField, hashMethod, ignoreMissing);
+ }
+ }
+}
diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java
index 0f8b248fd5af8..5b2db9ff940e7 100644
--- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java
+++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java
@@ -58,10 +58,20 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
import java.util.function.Supplier;
+import java.util.stream.Collectors;
public class IngestCommonModulePlugin extends Plugin implements ActionPlugin, IngestPlugin {
+ static final Setting> PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting(
+ "ingest.common.processors.allowed",
+ List.of(),
+ Function.identity(),
+ Setting.Property.NodeScope
+ );
+
static final Setting WATCHDOG_INTERVAL = Setting.timeSetting(
"ingest.grok.watchdog.interval",
TimeValue.timeValueSeconds(1),
@@ -77,7 +87,7 @@ public IngestCommonModulePlugin() {}
@Override
public Map getProcessors(Processor.Parameters parameters) {
- Map processors = new HashMap<>();
+ final Map processors = new HashMap<>();
processors.put(DateProcessor.TYPE, new DateProcessor.Factory(parameters.scriptService));
processors.put(SetProcessor.TYPE, new SetProcessor.Factory(parameters.scriptService));
processors.put(AppendProcessor.TYPE, new AppendProcessor.Factory(parameters.scriptService));
@@ -109,7 +119,8 @@ public Map getProcessors(Processor.Parameters paramet
processors.put(CopyProcessor.TYPE, new CopyProcessor.Factory(parameters.scriptService));
processors.put(RemoveByPatternProcessor.TYPE, new RemoveByPatternProcessor.Factory());
processors.put(CommunityIdProcessor.TYPE, new CommunityIdProcessor.Factory());
- return Collections.unmodifiableMap(processors);
+ processors.put(FingerprintProcessor.TYPE, new FingerprintProcessor.Factory());
+ return filterForAllowlistSetting(parameters.env.settings(), processors);
}
@Override
@@ -132,7 +143,7 @@ public List getRestHandlers(
@Override
public List> getSettings() {
- return Arrays.asList(WATCHDOG_INTERVAL, WATCHDOG_MAX_EXECUTION_TIME);
+ return Arrays.asList(WATCHDOG_INTERVAL, WATCHDOG_MAX_EXECUTION_TIME, PROCESSORS_ALLOWLIST_SETTING);
}
private static MatcherWatchdog createGrokThreadWatchdog(Processor.Parameters parameters) {
@@ -146,4 +157,27 @@ private static MatcherWatchdog createGrokThreadWatchdog(Processor.Parameters par
);
}
+ private Map filterForAllowlistSetting(Settings settings, Map map) {
+ if (PROCESSORS_ALLOWLIST_SETTING.exists(settings) == false) {
+ return Map.copyOf(map);
+ }
+ final Set allowlist = Set.copyOf(PROCESSORS_ALLOWLIST_SETTING.get(settings));
+ // Assert that no unknown processors are defined in the allowlist
+ final Set unknownAllowlistProcessors = allowlist.stream()
+ .filter(p -> map.containsKey(p) == false)
+ .collect(Collectors.toUnmodifiableSet());
+ if (unknownAllowlistProcessors.isEmpty() == false) {
+ throw new IllegalArgumentException(
+ "Processor(s) "
+ + unknownAllowlistProcessors
+ + " were defined in ["
+ + PROCESSORS_ALLOWLIST_SETTING.getKey()
+ + "] but do not exist"
+ );
+ }
+ return map.entrySet()
+ .stream()
+ .filter(e -> allowlist.contains(e.getKey()))
+ .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
+ }
}
diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorFactoryTests.java
new file mode 100644
index 0000000000000..74ad4cade7b37
--- /dev/null
+++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorFactoryTests.java
@@ -0,0 +1,119 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.ingest.common;
+
+import org.opensearch.OpenSearchParseException;
+import org.opensearch.test.OpenSearchTestCase;
+import org.junit.Before;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+public class FingerprintProcessorFactoryTests extends OpenSearchTestCase {
+
+ private FingerprintProcessor.Factory factory;
+
+ @Before
+ public void init() {
+ factory = new FingerprintProcessor.Factory();
+ }
+
+ public void testCreate() throws Exception {
+ Map config = new HashMap<>();
+
+ List fields = null;
+ List excludeFields = null;
+ if (randomBoolean()) {
+ fields = List.of(randomAlphaOfLength(10));
+ config.put("fields", fields);
+ } else {
+ excludeFields = List.of(randomAlphaOfLength(10));
+ config.put("exclude_fields", excludeFields);
+ }
+
+ String targetField = null;
+ if (randomBoolean()) {
+ targetField = randomAlphaOfLength(10);
+ }
+ config.put("target_field", targetField);
+
+ boolean ignoreMissing = randomBoolean();
+ config.put("ignore_missing", ignoreMissing);
+ String processorTag = randomAlphaOfLength(10);
+ FingerprintProcessor fingerprintProcessor = factory.create(null, processorTag, null, config);
+ assertThat(fingerprintProcessor.getTag(), equalTo(processorTag));
+ assertThat(fingerprintProcessor.getFields(), equalTo(fields));
+ assertThat(fingerprintProcessor.getExcludeFields(), equalTo(excludeFields));
+ assertThat(fingerprintProcessor.getTargetField(), equalTo(Objects.requireNonNullElse(targetField, "fingerprint")));
+ assertThat(fingerprintProcessor.isIgnoreMissing(), equalTo(ignoreMissing));
+ }
+
+ public void testCreateWithFields() throws Exception {
+ Map config = new HashMap<>();
+ config.put("fields", List.of(randomAlphaOfLength(10)));
+ config.put("exclude_fields", List.of(randomAlphaOfLength(10)));
+ try {
+ factory.create(null, null, null, config);
+ fail("factory create should have failed");
+ } catch (OpenSearchParseException e) {
+ assertThat(e.getMessage(), equalTo("[fields] either fields or exclude_fields can be set"));
+ }
+
+ config = new HashMap<>();
+ List fields = new ArrayList<>();
+ if (randomBoolean()) {
+ fields.add(null);
+ } else {
+ fields.add("");
+ }
+ config.put("fields", fields);
+ try {
+ factory.create(null, null, null, config);
+ fail("factory create should have failed");
+ } catch (OpenSearchParseException e) {
+ assertThat(e.getMessage(), equalTo("[fields] field name cannot be null nor empty"));
+ }
+
+ config = new HashMap<>();
+ List excludeFields = new ArrayList<>();
+ if (randomBoolean()) {
+ excludeFields.add(null);
+ } else {
+ excludeFields.add("");
+ }
+ config.put("exclude_fields", excludeFields);
+ try {
+ factory.create(null, null, null, config);
+ fail("factory create should have failed");
+ } catch (OpenSearchParseException e) {
+ assertThat(e.getMessage(), equalTo("[exclude_fields] field name cannot be null nor empty"));
+ }
+ }
+
+ public void testCreateWithHashMethod() throws Exception {
+ Map config = new HashMap<>();
+ List fields = List.of(randomAlphaOfLength(10));
+ config.put("fields", fields);
+ config.put("hash_method", randomAlphaOfLength(10));
+ try {
+ factory.create(null, null, null, config);
+ fail("factory create should have failed");
+ } catch (OpenSearchParseException e) {
+ assertThat(
+ e.getMessage(),
+ equalTo("[hash_method] hash method must be MD5@2.16.0, SHA-1@2.16.0, SHA-256@2.16.0 or SHA3-256@2.16.0")
+ );
+ }
+ }
+}
diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorTests.java
new file mode 100644
index 0000000000000..67a82f28fb763
--- /dev/null
+++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/FingerprintProcessorTests.java
@@ -0,0 +1,176 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.ingest.common;
+
+import org.opensearch.ingest.IngestDocument;
+import org.opensearch.ingest.Processor;
+import org.opensearch.ingest.RandomDocumentPicks;
+import org.opensearch.test.OpenSearchTestCase;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.hamcrest.Matchers.equalTo;
+
+public class FingerprintProcessorTests extends OpenSearchTestCase {
+ private final List hashMethods = List.of("MD5@2.16.0", "SHA-1@2.16.0", "SHA-256@2.16.0", "SHA3-256@2.16.0");
+
+ public void testGenerateFingerprint() throws Exception {
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
+ List fields = null;
+ List excludeFields = null;
+ if (randomBoolean()) {
+ fields = new ArrayList<>();
+ for (int i = 0; i < randomIntBetween(1, 10); i++) {
+ fields.add(RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10)));
+ }
+ } else {
+ excludeFields = new ArrayList<>();
+ for (int i = 0; i < randomIntBetween(1, 10); i++) {
+ excludeFields.add(RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10)));
+ }
+ }
+
+ String targetField = "fingerprint";
+ if (randomBoolean()) {
+ targetField = randomAlphaOfLength(10);
+ }
+
+ String hashMethod = randomFrom(hashMethods);
+ Processor processor = createFingerprintProcessor(fields, excludeFields, targetField, hashMethod, false);
+ processor.execute(ingestDocument);
+ assertThat(ingestDocument.hasField(targetField), equalTo(true));
+ }
+
+ public void testCreateFingerprintProcessorFailed() {
+ List fields = new ArrayList<>();
+ if (randomBoolean()) {
+ fields.add(null);
+ } else {
+ fields.add("");
+ }
+ fields.add(randomAlphaOfLength(10));
+
+ assertThrows(
+ "field name in [fields] cannot be null nor empty",
+ IllegalArgumentException.class,
+ () -> createFingerprintProcessor(fields, null, null, randomFrom(hashMethods), false)
+ );
+
+ List excludeFields = new ArrayList<>();
+ if (randomBoolean()) {
+ excludeFields.add(null);
+ } else {
+ excludeFields.add("");
+ }
+ excludeFields.add(randomAlphaOfLength(10));
+
+ assertThrows(
+ "field name in [exclude_fields] cannot be null nor empty",
+ IllegalArgumentException.class,
+ () -> createFingerprintProcessor(null, excludeFields, null, randomFrom(hashMethods), false)
+ );
+
+ assertThrows(
+ "either fields or exclude_fields can be set",
+ IllegalArgumentException.class,
+ () -> createFingerprintProcessor(
+ List.of(randomAlphaOfLength(10)),
+ List.of(randomAlphaOfLength(10)),
+ null,
+ randomFrom(hashMethods),
+ false
+ )
+ );
+
+ assertThrows(
+ "hash method must be MD5@2.16.0, SHA-1@2.16.0, SHA-256@2.16.0 or SHA3-256@2.16.0",
+ IllegalArgumentException.class,
+ () -> createFingerprintProcessor(Collections.emptyList(), null, "fingerprint", randomAlphaOfLength(10), false)
+ );
+ }
+
+ public void testEmptyFieldAndExcludeFields() throws Exception {
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
+ List fields = null;
+ List excludeFields = null;
+ if (randomBoolean()) {
+ fields = new ArrayList<>();
+ } else {
+ excludeFields = new ArrayList<>();
+ }
+ String targetField = "fingerprint";
+ if (randomBoolean()) {
+ targetField = randomAlphaOfLength(10);
+ }
+
+ String hashMethod = randomFrom(hashMethods);
+ Processor processor = createFingerprintProcessor(fields, excludeFields, targetField, hashMethod, false);
+ processor.execute(ingestDocument);
+ assertThat(ingestDocument.hasField(targetField), equalTo(true));
+ }
+
+ public void testIgnoreMissing() throws Exception {
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
+ String nonExistingFieldName = RandomDocumentPicks.randomNonExistingFieldName(random(), ingestDocument);
+ List nonExistingFields = List.of(nonExistingFieldName);
+ Processor processor = createFingerprintProcessor(nonExistingFields, null, "fingerprint", randomFrom(hashMethods), false);
+ assertThrows(
+ "field [" + nonExistingFieldName + "] doesn't exist",
+ IllegalArgumentException.class,
+ () -> processor.execute(ingestDocument)
+ );
+
+ String targetField = "fingerprint";
+ Processor processorWithIgnoreMissing = createFingerprintProcessor(
+ nonExistingFields,
+ null,
+ "fingerprint",
+ randomFrom(hashMethods),
+ true
+ );
+ processorWithIgnoreMissing.execute(ingestDocument);
+ assertThat(ingestDocument.hasField(targetField), equalTo(false));
+ }
+
+ public void testIgnoreMetadataFields() throws Exception {
+ IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
+ List metadataFields = ingestDocument.getMetadata()
+ .keySet()
+ .stream()
+ .map(IngestDocument.Metadata::getFieldName)
+ .collect(Collectors.toList());
+
+ String existingFieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, randomAlphaOfLength(10));
+ List fields = List.of(existingFieldName, metadataFields.get(randomIntBetween(0, metadataFields.size() - 1)));
+
+ String targetField = "fingerprint";
+ String algorithm = randomFrom(hashMethods);
+ Processor processor = createFingerprintProcessor(fields, null, targetField, algorithm, false);
+
+ processor.execute(ingestDocument);
+ String fingerprint = ingestDocument.getFieldValue(targetField, String.class);
+
+ processor = createFingerprintProcessor(List.of(existingFieldName), null, targetField, algorithm, false);
+ processor.execute(ingestDocument);
+ assertThat(ingestDocument.getFieldValue(targetField, String.class), equalTo(fingerprint));
+ }
+
+ private FingerprintProcessor createFingerprintProcessor(
+ List fields,
+ List excludeFields,
+ String targetField,
+ String hashMethod,
+ boolean ignoreMissing
+ ) {
+ return new FingerprintProcessor(randomAlphaOfLength(10), null, fields, excludeFields, targetField, hashMethod, ignoreMissing);
+ }
+}
diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/IngestCommonModulePluginTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/IngestCommonModulePluginTests.java
new file mode 100644
index 0000000000000..b0c1e0fdbaa63
--- /dev/null
+++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/IngestCommonModulePluginTests.java
@@ -0,0 +1,109 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.ingest.common;
+
+import org.opensearch.common.settings.Settings;
+import org.opensearch.env.TestEnvironment;
+import org.opensearch.ingest.Processor;
+import org.opensearch.test.OpenSearchTestCase;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+
+public class IngestCommonModulePluginTests extends OpenSearchTestCase {
+
+ public void testAllowlist() throws IOException {
+ runAllowlistTest(List.of());
+ runAllowlistTest(List.of("date"));
+ runAllowlistTest(List.of("set"));
+ runAllowlistTest(List.of("copy", "date"));
+ runAllowlistTest(List.of("date", "set", "copy"));
+ }
+
+ private void runAllowlistTest(List allowlist) throws IOException {
+ final Settings settings = Settings.builder()
+ .putList(IngestCommonModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowlist)
+ .build();
+ try (IngestCommonModulePlugin plugin = new IngestCommonModulePlugin()) {
+ assertEquals(Set.copyOf(allowlist), plugin.getProcessors(createParameters(settings)).keySet());
+ }
+ }
+
+ public void testAllowlistNotSpecified() throws IOException {
+ final Settings.Builder builder = Settings.builder();
+ builder.remove(IngestCommonModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey());
+ final Settings settings = builder.build();
+ try (IngestCommonModulePlugin plugin = new IngestCommonModulePlugin()) {
+ final Set expected = Set.of(
+ "append",
+ "urldecode",
+ "sort",
+ "fail",
+ "trim",
+ "set",
+ "fingerprint",
+ "pipeline",
+ "json",
+ "join",
+ "kv",
+ "bytes",
+ "date",
+ "drop",
+ "community_id",
+ "lowercase",
+ "convert",
+ "copy",
+ "gsub",
+ "dot_expander",
+ "rename",
+ "remove_by_pattern",
+ "html_strip",
+ "remove",
+ "csv",
+ "grok",
+ "date_index_name",
+ "foreach",
+ "script",
+ "dissect",
+ "uppercase",
+ "split"
+ );
+ assertEquals(expected, plugin.getProcessors(createParameters(settings)).keySet());
+ }
+ }
+
+ public void testAllowlistHasNonexistentProcessors() throws IOException {
+ final Settings settings = Settings.builder()
+ .putList(IngestCommonModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), List.of("threeve"))
+ .build();
+ try (IngestCommonModulePlugin plugin = new IngestCommonModulePlugin()) {
+ IllegalArgumentException e = expectThrows(
+ IllegalArgumentException.class,
+ () -> plugin.getProcessors(createParameters(settings))
+ );
+ assertTrue(e.getMessage(), e.getMessage().contains("threeve"));
+ }
+ }
+
+ private static Processor.Parameters createParameters(Settings settings) {
+ return new Processor.Parameters(
+ TestEnvironment.newEnvironment(Settings.builder().put(settings).put("path.home", "").build()),
+ null,
+ null,
+ null,
+ () -> 0L,
+ (a, b) -> null,
+ null,
+ null,
+ $ -> {},
+ null
+ );
+ }
+}
diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml
index 2a816f0386667..9bf4faf53a999 100644
--- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml
+++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml
@@ -86,3 +86,19 @@
- do:
nodes.info: {}
- contains: { nodes.$cluster_manager.ingest.processors: { type: community_id } }
+
+---
+"Fingerprint processor exists":
+ - skip:
+ version: " - 2.15.99"
+ features: contains
+ reason: "fingerprint processor was introduced in 2.16.0 and contains is a newly added assertion"
+ - do:
+ cluster.state: {}
+
+ # Get cluster-manager node id
+ - set: { cluster_manager_node: cluster_manager }
+
+ - do:
+ nodes.info: {}
+ - contains: { nodes.$cluster_manager.ingest.processors: { type: fingerprint } }
diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml
index a66f02d6b6a6d..984c67d39757d 100644
--- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml
+++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml
@@ -278,3 +278,78 @@ teardown:
body: {source_field: "fooBar", foo: {foo: "bar"}}
- match: { error.root_cause.0.type: "illegal_argument_exception" }
- match: { error.root_cause.0.reason: "Iterable object is self-referencing itself (ingest script)" }
+
+---
+"Test painless data types":
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "description": "_description",
+ "processors": [
+ {
+ "script" : {
+ "source" : "ctx.byte = (byte)127;ctx.short = (short)32767;ctx.int = (int)2147483647;ctx.long = (long)9223372036854775807L;ctx.float = (float)0.1;ctx.double = (double)0.1;ctx.boolean = (boolean)true"
+ }
+ },
+ {
+ "script" : {
+ "source" : "ctx.other_field = 'other_field'"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 1
+ pipeline: "my_pipeline"
+ body: {source_field: "FooBar"}
+
+ - do:
+ get:
+ index: test
+ id: 1
+ - match: { _source.byte: 127 }
+ - match: { _source.int: 2147483647 }
+ - match: { _source.long: 9223372036854775807 }
+ - gt: { _source.float: 0.0 }
+ - lt: { _source.float: 0.2 }
+ - gt: { _source.double: 0.0 }
+ - lt: { _source.double: 0.2 }
+ - match: { _source.boolean: true }
+
+---
+"Test char type fails":
+ - do:
+ ingest.put_pipeline:
+ id: "my_pipeline"
+ body: >
+ {
+ "description": "_description",
+ "processors": [
+ {
+ "script" : {
+ "source" : "ctx.char = (char)'a'"
+ }
+ },
+ {
+ "script" : {
+ "source" : "ctx.other_field = 'other_field'"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ catch: bad_request
+ index:
+ index: test
+ id: 1
+ pipeline: "my_pipeline"
+ body: {source_field: "FooBar"}
+ - match: { error.root_cause.0.type: "illegal_argument_exception" }
diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/340_fingerprint_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/340_fingerprint_processor.yml
new file mode 100644
index 0000000000000..04568916239f4
--- /dev/null
+++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/340_fingerprint_processor.yml
@@ -0,0 +1,786 @@
+---
+teardown:
+ - do:
+ ingest.delete_pipeline:
+ id: "1"
+ ignore: 404
+
+---
+"Test creat fingerprint processor":
+ - skip:
+ version: " - 2.15.99"
+ reason: "introduced in 2.16.0"
+ - do:
+ catch: /field name cannot be null nor empty/
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields": [null]
+ }
+ }
+ ]
+ }
+ - do:
+ catch: /field name cannot be null nor empty/
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "exclude_fields": [""]
+ }
+ }
+ ]
+ }
+ - do:
+ catch: /either fields or exclude\_fields can be set/
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields": ["foo"],
+ "exclude_fields": ["bar"]
+ }
+ }
+ ]
+ }
+
+ - do:
+ catch: /hash method must be MD5@2.16.0\, SHA\-1@2.16.0, SHA\-256@2.16.0 or SHA3\-256@2.16.0/
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields": ["foo"],
+ "hash_method": "non-existing"
+ }
+ }
+ ]
+ }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo"],
+ "target_field" : "fingerprint_field",
+ "hash_method": "SHA-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+---
+"Test fingerprint processor with ignore_missing":
+ - skip:
+ version: " - 2.15.99"
+ reason: "introduced in 2.16.0"
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo"]
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ catch: /field \[foo\] doesn't exist/
+ index:
+ index: test
+ id: 1
+ pipeline: "1"
+ body: {
+ bar: "bar"
+ }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo", "bar"],
+ "ignore_missing" : true
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo: "foo"
+ }
+ - do:
+ get:
+ index: test
+ id: 1
+ - match: { _source.fingerprint: "SHA-1@2.16.0:YqpBTuHXCPV04j/7lGfWeUl8Tyo=" }
+
+---
+"Test fingerprint processor with custom target field":
+ - skip:
+ version: " - 2.15.99"
+ reason: "introduced in 2.16.0"
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo"],
+ "target_field" : "target"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo: "foo"
+ }
+ - do:
+ get:
+ index: test
+ id: 1
+ - match: { _source.target: "SHA-1@2.16.0:YqpBTuHXCPV04j/7lGfWeUl8Tyo=" }
+
+---
+"Test fingerprint processor with non-primitive fields and SHA-1":
+ - skip:
+ version: " - 2.15.99"
+ reason: "introduced in 2.16.0"
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo", "bar", "zoo"]
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 1
+ - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 2
+ - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields":[]
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 3
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 3
+ - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "exclude_fields":[]
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 4
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 4
+ - match: { _source.fingerprint: "SHA-1@2.16.0:KYJ4pc4ouFmAbgZGp7CfNoykZeo=" }
+
+---
+"Test fingerprint processor with non-primitive fields and MD5":
+ - skip:
+ version: " - 2.15.99"
+ reason: "introduced in 2.16.0"
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo", "bar", "zoo"],
+ "hash_method" : "MD5@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 1
+ - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "hash_method" : "MD5@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 2
+ - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields":[],
+ "hash_method" : "MD5@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 3
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 3
+ - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "exclude_fields":[],
+ "hash_method" : "MD5@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 4
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 4
+ - match: { _source.fingerprint: "MD5@2.16.0:NovpcJ+MYHzEZtCewcDPTQ==" }
+
+
+---
+"Test fingerprint processor with non-primitive fields and SHA-256":
+ - skip:
+ version: " - 2.15.99"
+ reason: "introduced in 2.16.0"
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo", "bar", "zoo"],
+ "hash_method" : "SHA-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 1
+ - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "hash_method" : "SHA-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 2
+ - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields":[],
+ "hash_method" : "SHA-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 3
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 3
+ - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "exclude_fields":[],
+ "hash_method" : "SHA-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 4
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 4
+ - match: { _source.fingerprint: "SHA-256@2.16.0:Sdlg0BodM3n1my4BvaTfJCPrvHxfrxno0kCLfMaC+XY=" }
+
+---
+"Test fingerprint processor with non-primitive fields and SHA3-256":
+ - skip:
+ version: " - 2.15.99"
+ reason: "introduced in 2.16.0"
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields" : ["foo", "bar", "zoo"],
+ "hash_method" : "SHA3-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 1
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 1
+ - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "hash_method" : "SHA3-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 2
+ - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "fields":[],
+ "hash_method" : "SHA3-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 3
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 3
+ - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" }
+
+ - do:
+ ingest.put_pipeline:
+ id: "1"
+ body: >
+ {
+ "processors": [
+ {
+ "fingerprint" : {
+ "exclude_fields":[],
+ "hash_method" : "SHA3-256@2.16.0"
+ }
+ }
+ ]
+ }
+ - match: { acknowledged: true }
+
+ - do:
+ index:
+ index: test
+ id: 4
+ pipeline: "1"
+ body: {
+ foo: [1, 2, 3],
+ bar: {
+ field: {
+ innerField: "inner"
+ }
+ },
+ zoo: null
+ }
+ - do:
+ get:
+ index: test
+ id: 4
+ - match: { _source.fingerprint: "SHA3-256@2.16.0:+GZCkMLEMkUA/4IrEZEZZYsVMbZdpJ92ppN3wUsFYOI=" }
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1
deleted file mode 100644
index 29aade3ad4298..0000000000000
--- a/modules/lang-expression/licenses/lucene-expressions-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5e21d20edee0712472e7c6f605c9d97aeecf16c0
\ No newline at end of file
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-847316d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..83dd8e657bdd5
--- /dev/null
+++ b/modules/lang-expression/licenses/lucene-expressions-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+b866103bbaca4141c152deca9252bd137026dafc
\ No newline at end of file
diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle
index bcf5c07ea8c64..a836124f94b41 100644
--- a/modules/lang-mustache/build.gradle
+++ b/modules/lang-mustache/build.gradle
@@ -38,7 +38,7 @@ opensearchplugin {
}
dependencies {
- api "com.github.spullara.mustache.java:compiler:0.9.13"
+ api "com.github.spullara.mustache.java:compiler:0.9.14"
}
restResources {
diff --git a/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1
deleted file mode 100644
index 70d53aac260eb..0000000000000
--- a/modules/lang-mustache/licenses/compiler-0.9.13.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-60666500a7dce7a5d3e17c09b46ea6f037192bd5
\ No newline at end of file
diff --git a/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1 b/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1
new file mode 100644
index 0000000000000..29069ac90817a
--- /dev/null
+++ b/modules/lang-mustache/licenses/compiler-0.9.14.jar.sha1
@@ -0,0 +1 @@
+e6df8b5aabb80d6eb6d8fef312a56d66b7659ba6
\ No newline at end of file
diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java
index 400d867296e5f..3115dce6c10a5 100644
--- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java
+++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java
@@ -35,6 +35,7 @@
import com.fasterxml.jackson.core.JsonParseException;
import org.apache.lucene.document.Field;
+import org.apache.lucene.document.LongPoint;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
@@ -165,7 +166,7 @@ public ScaledFloatFieldMapper build(BuilderContext context) {
public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, c.getSettings()));
- public static final class ScaledFloatFieldType extends SimpleMappedFieldType {
+ public static final class ScaledFloatFieldType extends SimpleMappedFieldType implements NumericPointEncoder {
private final double scalingFactor;
private final Double nullValue;
@@ -188,6 +189,21 @@ public ScaledFloatFieldType(String name, double scalingFactor) {
this(name, true, false, true, Collections.emptyMap(), scalingFactor, null);
}
+ @Override
+ public byte[] encodePoint(Number value) {
+ assert value instanceof Double;
+ double doubleValue = (Double) value;
+ byte[] point = new byte[Long.BYTES];
+ if (doubleValue == Double.POSITIVE_INFINITY) {
+ LongPoint.encodeDimension(Long.MAX_VALUE, point, 0);
+ } else if (doubleValue == Double.NEGATIVE_INFINITY) {
+ LongPoint.encodeDimension(Long.MIN_VALUE, point, 0);
+ } else {
+ LongPoint.encodeDimension(Math.round(scale(value)), point, 0);
+ }
+ return point;
+ }
+
public double getScalingFactor() {
return scalingFactor;
}
diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java
index 5378a6721efb2..1574621a8200e 100644
--- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java
+++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java
@@ -8,24 +8,61 @@
package org.opensearch.search.pipeline.common;
+import org.opensearch.common.settings.Setting;
+import org.opensearch.common.settings.Settings;
import org.opensearch.plugins.Plugin;
import org.opensearch.plugins.SearchPipelinePlugin;
import org.opensearch.search.pipeline.Processor;
+import org.opensearch.search.pipeline.SearchPhaseResultsProcessor;
import org.opensearch.search.pipeline.SearchRequestProcessor;
import org.opensearch.search.pipeline.SearchResponseProcessor;
+import java.util.List;
import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
/**
* Plugin providing common search request/response processors for use in search pipelines.
*/
public class SearchPipelineCommonModulePlugin extends Plugin implements SearchPipelinePlugin {
+ static final Setting> REQUEST_PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting(
+ "search.pipeline.common.request.processors.allowed",
+ List.of(),
+ Function.identity(),
+ Setting.Property.NodeScope
+ );
+
+ static final Setting> RESPONSE_PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting(
+ "search.pipeline.common.response.processors.allowed",
+ List.of(),
+ Function.identity(),
+ Setting.Property.NodeScope
+ );
+
+ static final Setting> SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING = Setting.listSetting(
+ "search.pipeline.common.search.phase.results.processors.allowed",
+ List.of(),
+ Function.identity(),
+ Setting.Property.NodeScope
+ );
+
/**
* No constructor needed, but build complains if we don't have a constructor with JavaDoc.
*/
public SearchPipelineCommonModulePlugin() {}
+ @Override
+ public List> getSettings() {
+ return List.of(
+ REQUEST_PROCESSORS_ALLOWLIST_SETTING,
+ RESPONSE_PROCESSORS_ALLOWLIST_SETTING,
+ SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING
+ );
+ }
+
/**
* Returns a map of processor factories.
*
@@ -34,25 +71,62 @@ public SearchPipelineCommonModulePlugin() {}
*/
@Override
public Map> getRequestProcessors(Parameters parameters) {
- return Map.of(
- FilterQueryRequestProcessor.TYPE,
- new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry),
- ScriptRequestProcessor.TYPE,
- new ScriptRequestProcessor.Factory(parameters.scriptService),
- OversampleRequestProcessor.TYPE,
- new OversampleRequestProcessor.Factory()
+ return filterForAllowlistSetting(
+ REQUEST_PROCESSORS_ALLOWLIST_SETTING,
+ parameters.env.settings(),
+ Map.of(
+ FilterQueryRequestProcessor.TYPE,
+ new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry),
+ ScriptRequestProcessor.TYPE,
+ new ScriptRequestProcessor.Factory(parameters.scriptService),
+ OversampleRequestProcessor.TYPE,
+ new OversampleRequestProcessor.Factory()
+ )
);
}
@Override
public Map> getResponseProcessors(Parameters parameters) {
- return Map.of(
- RenameFieldResponseProcessor.TYPE,
- new RenameFieldResponseProcessor.Factory(),
- TruncateHitsResponseProcessor.TYPE,
- new TruncateHitsResponseProcessor.Factory(),
- CollapseResponseProcessor.TYPE,
- new CollapseResponseProcessor.Factory()
+ return filterForAllowlistSetting(
+ RESPONSE_PROCESSORS_ALLOWLIST_SETTING,
+ parameters.env.settings(),
+ Map.of(
+ RenameFieldResponseProcessor.TYPE,
+ new RenameFieldResponseProcessor.Factory(),
+ TruncateHitsResponseProcessor.TYPE,
+ new TruncateHitsResponseProcessor.Factory(),
+ CollapseResponseProcessor.TYPE,
+ new CollapseResponseProcessor.Factory()
+ )
);
}
+
+ @Override
+ public Map> getSearchPhaseResultsProcessors(Parameters parameters) {
+ return filterForAllowlistSetting(SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING, parameters.env.settings(), Map.of());
+ }
+
+ private Map> filterForAllowlistSetting(
+ Setting> allowlistSetting,
+ Settings settings,
+ Map> map
+ ) {
+ if (allowlistSetting.exists(settings) == false) {
+ return Map.copyOf(map);
+ }
+ final Set allowlist = Set.copyOf(allowlistSetting.get(settings));
+ // Assert that no unknown processors are defined in the allowlist
+ final Set unknownAllowlistProcessors = allowlist.stream()
+ .filter(p -> map.containsKey(p) == false)
+ .collect(Collectors.toUnmodifiableSet());
+ if (unknownAllowlistProcessors.isEmpty() == false) {
+ throw new IllegalArgumentException(
+ "Processor(s) " + unknownAllowlistProcessors + " were defined in [" + allowlistSetting.getKey() + "] but do not exist"
+ );
+ }
+ return map.entrySet()
+ .stream()
+ .filter(e -> allowlist.contains(e.getKey()))
+ .collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue));
+ }
}
diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java
new file mode 100644
index 0000000000000..519468ebe17ff
--- /dev/null
+++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePluginTests.java
@@ -0,0 +1,106 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.search.pipeline.common;
+
+import org.opensearch.common.settings.Settings;
+import org.opensearch.env.TestEnvironment;
+import org.opensearch.plugins.SearchPipelinePlugin;
+import org.opensearch.test.OpenSearchTestCase;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.BiFunction;
+
+public class SearchPipelineCommonModulePluginTests extends OpenSearchTestCase {
+
+ public void testRequestProcessorAllowlist() throws IOException {
+ final String key = SearchPipelineCommonModulePlugin.REQUEST_PROCESSORS_ALLOWLIST_SETTING.getKey();
+ runAllowlistTest(key, List.of(), SearchPipelineCommonModulePlugin::getRequestProcessors);
+ runAllowlistTest(key, List.of("filter_query"), SearchPipelineCommonModulePlugin::getRequestProcessors);
+ runAllowlistTest(key, List.of("script"), SearchPipelineCommonModulePlugin::getRequestProcessors);
+ runAllowlistTest(key, List.of("oversample", "script"), SearchPipelineCommonModulePlugin::getRequestProcessors);
+ runAllowlistTest(key, List.of("filter_query", "script", "oversample"), SearchPipelineCommonModulePlugin::getRequestProcessors);
+
+ final IllegalArgumentException e = expectThrows(
+ IllegalArgumentException.class,
+ () -> runAllowlistTest(key, List.of("foo"), SearchPipelineCommonModulePlugin::getRequestProcessors)
+ );
+ assertTrue(e.getMessage(), e.getMessage().contains("foo"));
+ }
+
+ public void testResponseProcessorAllowlist() throws IOException {
+ final String key = SearchPipelineCommonModulePlugin.RESPONSE_PROCESSORS_ALLOWLIST_SETTING.getKey();
+ runAllowlistTest(key, List.of(), SearchPipelineCommonModulePlugin::getResponseProcessors);
+ runAllowlistTest(key, List.of("rename_field"), SearchPipelineCommonModulePlugin::getResponseProcessors);
+ runAllowlistTest(key, List.of("truncate_hits"), SearchPipelineCommonModulePlugin::getResponseProcessors);
+ runAllowlistTest(key, List.of("collapse", "truncate_hits"), SearchPipelineCommonModulePlugin::getResponseProcessors);
+ runAllowlistTest(
+ key,
+ List.of("rename_field", "truncate_hits", "collapse"),
+ SearchPipelineCommonModulePlugin::getResponseProcessors
+ );
+
+ final IllegalArgumentException e = expectThrows(
+ IllegalArgumentException.class,
+ () -> runAllowlistTest(key, List.of("foo"), SearchPipelineCommonModulePlugin::getResponseProcessors)
+ );
+ assertTrue(e.getMessage(), e.getMessage().contains("foo"));
+ }
+
+ public void testSearchPhaseResultsProcessorAllowlist() throws IOException {
+ final String key = SearchPipelineCommonModulePlugin.SEARCH_PHASE_RESULTS_PROCESSORS_ALLOWLIST_SETTING.getKey();
+ runAllowlistTest(key, List.of(), SearchPipelineCommonModulePlugin::getSearchPhaseResultsProcessors);
+
+ final IllegalArgumentException e = expectThrows(
+ IllegalArgumentException.class,
+ () -> runAllowlistTest(key, List.of("foo"), SearchPipelineCommonModulePlugin::getSearchPhaseResultsProcessors)
+ );
+ assertTrue(e.getMessage(), e.getMessage().contains("foo"));
+ }
+
+ private void runAllowlistTest(
+ String settingKey,
+ List allowlist,
+ BiFunction> function
+ ) throws IOException {
+ final Settings settings = Settings.builder().putList(settingKey, allowlist).build();
+ try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) {
+ assertEquals(Set.copyOf(allowlist), function.apply(plugin, createParameters(settings)).keySet());
+ }
+ }
+
+ public void testAllowlistNotSpecified() throws IOException {
+ final Settings settings = Settings.EMPTY;
+ try (SearchPipelineCommonModulePlugin plugin = new SearchPipelineCommonModulePlugin()) {
+ assertEquals(Set.of("oversample", "filter_query", "script"), plugin.getRequestProcessors(createParameters(settings)).keySet());
+ assertEquals(
+ Set.of("rename_field", "truncate_hits", "collapse"),
+ plugin.getResponseProcessors(createParameters(settings)).keySet()
+ );
+ assertEquals(Set.of(), plugin.getSearchPhaseResultsProcessors(createParameters(settings)).keySet());
+ }
+ }
+
+ private static SearchPipelinePlugin.Parameters createParameters(Settings settings) {
+ return new SearchPipelinePlugin.Parameters(
+ TestEnvironment.newEnvironment(Settings.builder().put(settings).put("path.home", "").build()),
+ null,
+ null,
+ null,
+ () -> 0L,
+ (a, b) -> null,
+ null,
+ null,
+ $ -> {},
+ null
+ );
+ }
+}
diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1
deleted file mode 100644
index faaf70c858a6e..0000000000000
--- a/modules/transport-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3d918a9ee057d995c362902b54634fc307132aac
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..6784ac6c3b64f
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+b54863f578939e135d3b3aea610284ae57c188cf
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1
deleted file mode 100644
index 7affbc14fa93a..0000000000000
--- a/modules/transport-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f1fa43b03e93ab88e805b6a4e3e83780c80b47d2
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..3d86194de9213
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+a6762ec00a6d268f9980741f5b755838bcd658bf
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1
deleted file mode 100644
index 07730a5606ce2..0000000000000
--- a/modules/transport-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d05cd927209ea25bbf342962c00b8e5a828c2a4
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..4ef1adb818300
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+c6ecbc452321e632bf3cea0f9758839b650455c7
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
deleted file mode 100644
index ebd1e0d52efb2..0000000000000
--- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e0849843eb5b1c036b12551baca98a9f7ff847a0
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..06c86b8fda557
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+f0cca5df75bfb4f858d0435f601d8b1cae1de054
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 568c0aa2a2c03..0000000000000
--- a/modules/transport-netty4/licenses/netty-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ec361e7e025c029be50c55c8480080cabcbc01e7
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..16cb1cce7f504
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+58210befcb31adbcadd5724966a061444db91863
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2d6050dd1e3a5..0000000000000
--- a/modules/transport-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-168db749c22652ee7fed1ebf7ec46ce856d75e51
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..2f70f791f65ed
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1
deleted file mode 100644
index c3ee8087a8b5d..0000000000000
--- a/modules/transport-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-66c15921104cda0159b34e316541bc765dfaf3c0
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..621cbf58f3133
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+3493179999f211dc49714319f81da2be86523a3b
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1
deleted file mode 100644
index 32c8fa2b876a2..0000000000000
--- a/modules/transport-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b91f04c39ac14d6a29d07184ef305953ee6e0348
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..ac96e7545ed58
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+24e97cf14ea9d80afe4c5ab69066b587fccc154a
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2c468962b1b64..0000000000000
--- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a7096e7c0a25a983647909d7513f5d4943d589c0
\ No newline at end of file
diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..0847ac3034db7
--- /dev/null
+++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+acafc128cddafa021bc0b48b0788eb0e118add5e
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1
deleted file mode 100644
index 6f0501d3312ae..0000000000000
--- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5c7f2d8eab0fca3fdc3d3e57a7f48a335dc7ac33
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..80e254ed3d098
--- /dev/null
+++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+04436942995a4952ce5654126dfb767d6335674e
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1
deleted file mode 100644
index 25031381c9cb3..0000000000000
--- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-efcf65dda1b4e9d7e83926fd5895a47e491cbf29
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..3baed2a6e660b
--- /dev/null
+++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+85918e24fc3bf63fcd953807ab2eb3fa55c987c2
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1
deleted file mode 100644
index e27d45b217dad..0000000000000
--- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-59599d7b8bed2e6bd27d0dad7935c078b98c39cc
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..4e9327112d412
--- /dev/null
+++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+15e425e9cc0ab9d65fac3c919199a24dfa3631eb
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1
deleted file mode 100644
index ad5473865537d..0000000000000
--- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e55f83bb373ac139e313f64e80afe1eb0a75b8c0
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..7e7e9fe5b22b4
--- /dev/null
+++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+3d16c18348e7d4a00cb83100c43f3e21239d224e
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1
deleted file mode 100644
index 68abd162e7266..0000000000000
--- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1be59d91c45a4de069611fb7f8aa3e8fd26020ec
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..98e0ecc9cbb89
--- /dev/null
+++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+2ef6d9dffc6816d3cd04a54fe1ee43e13f850a37
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1
deleted file mode 100644
index c5f1521ec3769..0000000000000
--- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d5b5922acf3743b5a0c542959dd93fca8be333a7
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..ef675f2b9702e
--- /dev/null
+++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+e72b2262f5393d9ff255fb901297d4e6790e9102
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1
deleted file mode 100644
index b676ca507467a..0000000000000
--- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-50fd7b471cbdd6648c4972169f3fc67fae9db7f6
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-847316d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..d8bbac27fd360
--- /dev/null
+++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+416ac44b2e76592c9e85338798cae93c3cf5475e
\ No newline at end of file
diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle
index 61e9f71712eaf..0f822a02e05d8 100644
--- a/plugins/repository-azure/build.gradle
+++ b/plugins/repository-azure/build.gradle
@@ -47,7 +47,7 @@ dependencies {
api 'com.azure:azure-core:1.49.1'
api 'com.azure:azure-json:1.1.0'
api 'com.azure:azure-xml:1.0.0'
- api 'com.azure:azure-storage-common:12.21.2'
+ api 'com.azure:azure-storage-common:12.25.1'
api 'com.azure:azure-core-http-netty:1.15.1'
api "io.netty:netty-codec-dns:${versions.netty}"
api "io.netty:netty-codec-socks:${versions.netty}"
@@ -57,19 +57,19 @@ dependencies {
api "io.netty:netty-transport-native-unix-common:${versions.netty}"
implementation project(':modules:transport-netty4')
api 'com.azure:azure-storage-blob:12.23.0'
- api 'com.azure:azure-identity:1.11.4'
+ api 'com.azure:azure-identity:1.13.0'
// Start of transitive dependencies for azure-identity
- api 'com.microsoft.azure:msal4j-persistence-extension:1.2.0'
+ api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0'
api "net.java.dev.jna:jna-platform:${versions.jna}"
- api 'com.microsoft.azure:msal4j:1.14.3'
+ api 'com.microsoft.azure:msal4j:1.16.0'
api 'com.nimbusds:oauth2-oidc-sdk:11.9.1'
- api 'com.nimbusds:nimbus-jose-jwt:9.37.3'
+ api 'com.nimbusds:nimbus-jose-jwt:9.40'
api 'com.nimbusds:content-type:2.3'
api 'com.nimbusds:lang-tag:1.7'
// Both msal4j:1.14.3 and oauth2-oidc-sdk:11.9.1 has compile dependency on different versions of json-smart,
// selected the higher version which is 2.5.0
api 'net.minidev:json-smart:2.5.0'
- api 'net.minidev:accessors-smart:2.5.0'
+ api 'net.minidev:accessors-smart:2.5.1'
api "org.ow2.asm:asm:${versions.asm}"
// End of transitive dependencies for azure-identity
api "io.projectreactor.netty:reactor-netty-core:${versions.reactor_netty}"
@@ -219,11 +219,6 @@ thirdPartyAudit {
'org.bouncycastle.cert.X509CertificateHolder',
'org.bouncycastle.cert.jcajce.JcaX509CertificateHolder',
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
- 'org.bouncycastle.crypto.InvalidCipherTextException',
- 'org.bouncycastle.crypto.engines.AESEngine',
- 'org.bouncycastle.crypto.modes.GCMBlockCipher',
- 'org.bouncycastle.jcajce.provider.BouncyCastleFipsProvider',
- 'org.bouncycastle.jce.provider.BouncyCastleProvider',
'org.bouncycastle.openssl.PEMKeyPair',
'org.bouncycastle.openssl.PEMParser',
'org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter',
diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1
deleted file mode 100644
index 1578c94fcdc7b..0000000000000
--- a/plugins/repository-azure/licenses/accessors-smart-2.5.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-aca011492dfe9c26f4e0659028a4fe0970829dd8
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1 b/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1
new file mode 100644
index 0000000000000..8f7452437323d
--- /dev/null
+++ b/plugins/repository-azure/licenses/accessors-smart-2.5.1.jar.sha1
@@ -0,0 +1 @@
+19b820261eb2e7de7d5bde11d1c06e4501dd7e5f
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-identity-1.11.4.jar.sha1 b/plugins/repository-azure/licenses/azure-identity-1.11.4.jar.sha1
deleted file mode 100644
index c8d98ba9c8ad2..0000000000000
--- a/plugins/repository-azure/licenses/azure-identity-1.11.4.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-59b5ce48888f638b80d85ef5aa0e22a265d3dc89
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-identity-1.13.0.jar.sha1 b/plugins/repository-azure/licenses/azure-identity-1.13.0.jar.sha1
new file mode 100644
index 0000000000000..b59c2a3be5c92
--- /dev/null
+++ b/plugins/repository-azure/licenses/azure-identity-1.13.0.jar.sha1
@@ -0,0 +1 @@
+54b44a74636322d06e9dc42d611a9f12a0966790
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.21.2.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.21.2.jar.sha1
deleted file mode 100644
index b3c73774764df..0000000000000
--- a/plugins/repository-azure/licenses/azure-storage-common-12.21.2.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d2676d4fc40a501bd5d0437b8d2bfb9926022bea
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1
new file mode 100644
index 0000000000000..822a60d81ca27
--- /dev/null
+++ b/plugins/repository-azure/licenses/azure-storage-common-12.25.1.jar.sha1
@@ -0,0 +1 @@
+96e2df76ce9a8fa084ae289bb59295d565f2b8d5
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/msal4j-1.14.3.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.14.3.jar.sha1
deleted file mode 100644
index 2a6e42e3f2b48..0000000000000
--- a/plugins/repository-azure/licenses/msal4j-1.14.3.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-117b28c41bd760f979ed1b6467c5ec491f0d4d60
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/msal4j-1.16.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-1.16.0.jar.sha1
new file mode 100644
index 0000000000000..29fe5022a1570
--- /dev/null
+++ b/plugins/repository-azure/licenses/msal4j-1.16.0.jar.sha1
@@ -0,0 +1 @@
+708a0a986ed091054f1c08866712e5b41aec6700
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/msal4j-persistence-extension-1.2.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-persistence-extension-1.2.0.jar.sha1
deleted file mode 100644
index cfcf7548b7694..0000000000000
--- a/plugins/repository-azure/licenses/msal4j-persistence-extension-1.2.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-1111a95878de8745ddc9de132df18ebd9ca7024d
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/msal4j-persistence-extension-1.3.0.jar.sha1 b/plugins/repository-azure/licenses/msal4j-persistence-extension-1.3.0.jar.sha1
new file mode 100644
index 0000000000000..9c5909e7ff240
--- /dev/null
+++ b/plugins/repository-azure/licenses/msal4j-persistence-extension-1.3.0.jar.sha1
@@ -0,0 +1 @@
+8a8ef1517d27a5b4de1512ef94679bdb59f210b6
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1
deleted file mode 100644
index c4ca8f15e85c5..0000000000000
--- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-381c5bf8b7570c163fa7893a26d02b7ac36ff6eb
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..5e3f819012811
--- /dev/null
+++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
deleted file mode 100644
index ebd1e0d52efb2..0000000000000
--- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e0849843eb5b1c036b12551baca98a9f7ff847a0
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..06c86b8fda557
--- /dev/null
+++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+f0cca5df75bfb4f858d0435f601d8b1cae1de054
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1
deleted file mode 100644
index 9f6e95ba38d2e..0000000000000
--- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4d54c8d5b95b14756043efb59b8c3e62ec67aa43
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..226ee06d39d6c
--- /dev/null
+++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+ea52ef6617a9b69b0baaebb7f0b80373527f9607
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1
deleted file mode 100644
index f31396d94c2ec..0000000000000
--- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b7fb401dd47c79e6b99f2319ac3b561c50c31c30
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..dcc2b0c7ca923
--- /dev/null
+++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+1e459c8630bb7c942b79a97e62dd728798de6a8c
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1
deleted file mode 100644
index 18d122acd2c44..0000000000000
--- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3e687cdc4ecdbbad07508a11b715bdf95fa20939
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..b22ad6784809b
--- /dev/null
+++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+5ac6a3d96935129ba45ea768ad30e31cad0d8c4d
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2c468962b1b64..0000000000000
--- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a7096e7c0a25a983647909d7513f5d4943d589c0
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..0847ac3034db7
--- /dev/null
+++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+acafc128cddafa021bc0b48b0788eb0e118add5e
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.37.3.jar.sha1 b/plugins/repository-azure/licenses/nimbus-jose-jwt-9.37.3.jar.sha1
deleted file mode 100644
index 7278cd8994f71..0000000000000
--- a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.37.3.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-700f71ffefd60c16bd8ce711a956967ea9071cec
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.40.jar.sha1 b/plugins/repository-azure/licenses/nimbus-jose-jwt-9.40.jar.sha1
new file mode 100644
index 0000000000000..83228caf233cc
--- /dev/null
+++ b/plugins/repository-azure/licenses/nimbus-jose-jwt-9.40.jar.sha1
@@ -0,0 +1 @@
+42b1dfa0360e4062951b070bac52dd8d96fd7b38
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1
deleted file mode 100644
index cbcbfcd87d682..0000000000000
--- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.19.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-639e2c63ade6f2a49d7e501ca2264b74d240b448
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1
new file mode 100644
index 0000000000000..2f4d023c88c80
--- /dev/null
+++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1
@@ -0,0 +1 @@
+1a5ef52a470a82d9313e2e1ad8ba064bdbd38948
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1
deleted file mode 100644
index 1eeedfc0926f5..0000000000000
--- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.19.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07
\ No newline at end of file
diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1
new file mode 100644
index 0000000000000..6c031e00e39c1
--- /dev/null
+++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1
@@ -0,0 +1 @@
+8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46
\ No newline at end of file
diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java
index f39ed185d8b35..4f30247f0af08 100644
--- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java
+++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java
@@ -141,6 +141,9 @@ public Void run() {
// - https://github.com/Azure/azure-sdk-for-java/pull/25004
// - https://github.com/Azure/azure-sdk-for-java/pull/24374
Configuration.getGlobalConfiguration().put("AZURE_JACKSON_ADAPTER_USE_ACCESS_HELPER", "true");
+ // See please:
+ // - https://github.com/Azure/azure-sdk-for-java/issues/37464
+ Configuration.getGlobalConfiguration().put("AZURE_ENABLE_SHUTDOWN_HOOK_WITH_PRIVILEGE", "true");
}
public AzureStorageService(Settings settings) {
diff --git a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy
index e8fbe35ebab1d..eedcfd98da150 100644
--- a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy
+++ b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy
@@ -38,6 +38,7 @@ grant {
permission java.lang.RuntimePermission "accessDeclaredMembers";
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
permission java.lang.RuntimePermission "setContextClassLoader";
+ permission java.lang.RuntimePermission "shutdownHooks";
// azure client set Authenticator for proxy username/password
permission java.net.NetPermission "setDefaultAuthenticator";
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index eb50bd2d0615a..63eb783649884 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -74,7 +74,7 @@ dependencies {
api "commons-codec:commons-codec:${versions.commonscodec}"
api 'commons-collections:commons-collections:3.2.2'
api "org.apache.commons:commons-compress:${versions.commonscompress}"
- api 'org.apache.commons:commons-configuration2:2.10.1'
+ api 'org.apache.commons:commons-configuration2:2.11.0'
api "commons-io:commons-io:${versions.commonsio}"
api 'org.apache.commons:commons-lang3:3.14.0'
implementation 'com.google.re2j:re2j:1.7'
diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1
deleted file mode 100644
index d4c0f8417d357..0000000000000
--- a/plugins/repository-hdfs/licenses/commons-configuration2-2.10.1.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2b681b3bcddeaa5bf5c2a2939cd77e2f9ad6efda
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/commons-configuration2-2.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-configuration2-2.11.0.jar.sha1
new file mode 100644
index 0000000000000..eea24804c5228
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/commons-configuration2-2.11.0.jar.sha1
@@ -0,0 +1 @@
+af5a2c6abe587074c0be1107fcb27fa2fad91304
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1
deleted file mode 100644
index 8f8d86e6065b2..0000000000000
--- a/plugins/repository-hdfs/licenses/netty-all-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-db3f4d3ad3d16e26991a64d50b749ae09e0e0c8e
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..076124a7d1f89
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/netty-all-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+8fba10bb4911517eb1bdcc05ef392499dda4d5ac
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1
deleted file mode 100644
index faaf70c858a6e..0000000000000
--- a/plugins/repository-s3/licenses/netty-buffer-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3d918a9ee057d995c362902b54634fc307132aac
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..6784ac6c3b64f
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-buffer-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+b54863f578939e135d3b3aea610284ae57c188cf
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1
deleted file mode 100644
index 7affbc14fa93a..0000000000000
--- a/plugins/repository-s3/licenses/netty-codec-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f1fa43b03e93ab88e805b6a4e3e83780c80b47d2
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..3d86194de9213
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-codec-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+a6762ec00a6d268f9980741f5b755838bcd658bf
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1
deleted file mode 100644
index 07730a5606ce2..0000000000000
--- a/plugins/repository-s3/licenses/netty-codec-http-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d05cd927209ea25bbf342962c00b8e5a828c2a4
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..4ef1adb818300
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+c6ecbc452321e632bf3cea0f9758839b650455c7
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
deleted file mode 100644
index ebd1e0d52efb2..0000000000000
--- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e0849843eb5b1c036b12551baca98a9f7ff847a0
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..06c86b8fda557
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+f0cca5df75bfb4f858d0435f601d8b1cae1de054
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 568c0aa2a2c03..0000000000000
--- a/plugins/repository-s3/licenses/netty-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ec361e7e025c029be50c55c8480080cabcbc01e7
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..16cb1cce7f504
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+58210befcb31adbcadd5724966a061444db91863
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2d6050dd1e3a5..0000000000000
--- a/plugins/repository-s3/licenses/netty-handler-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-168db749c22652ee7fed1ebf7ec46ce856d75e51
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..2f70f791f65ed
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-handler-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1
deleted file mode 100644
index c3ee8087a8b5d..0000000000000
--- a/plugins/repository-s3/licenses/netty-resolver-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-66c15921104cda0159b34e316541bc765dfaf3c0
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..621cbf58f3133
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-resolver-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+3493179999f211dc49714319f81da2be86523a3b
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1
deleted file mode 100644
index 32c8fa2b876a2..0000000000000
--- a/plugins/repository-s3/licenses/netty-transport-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b91f04c39ac14d6a29d07184ef305953ee6e0348
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..ac96e7545ed58
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-transport-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+24e97cf14ea9d80afe4c5ab69066b587fccc154a
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1
deleted file mode 100644
index 408f3aa5d1339..0000000000000
--- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3ca1cff0bf82bfd38e89f6946e54f24cbb3424a2
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..97001777eadf5
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+8b97d32eb1489043e478deea99bd93ce487b82f6
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2c468962b1b64..0000000000000
--- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a7096e7c0a25a983647909d7513f5d4943d589c0
\ No newline at end of file
diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..0847ac3034db7
--- /dev/null
+++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+acafc128cddafa021bc0b48b0788eb0e118add5e
\ No newline at end of file
diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle
index 735cbd92b691a..66d172e3dc7f3 100644
--- a/plugins/telemetry-otel/build.gradle
+++ b/plugins/telemetry-otel/build.gradle
@@ -37,7 +37,7 @@ dependencies {
runtimeOnly "com.squareup.okhttp3:okhttp:4.11.0"
runtimeOnly "com.squareup.okio:okio-jvm:3.5.0"
runtimeOnly "io.opentelemetry:opentelemetry-exporter-sender-okhttp:${versions.opentelemetry}"
- api "io.opentelemetry:opentelemetry-extension-incubator:${versions.opentelemetry}-alpha"
+ api "io.opentelemetry:opentelemetry-api-incubator:${versions.opentelemetry}-alpha"
testImplementation "io.opentelemetry:opentelemetry-sdk-testing:${versions.opentelemetry}"
}
@@ -48,7 +48,9 @@ thirdPartyAudit {
'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerIndexField',
'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerLimitField',
'io.opentelemetry.internal.shaded.jctools.util.UnsafeAccess',
- 'io.opentelemetry.internal.shaded.jctools.util.UnsafeRefArrayAccess'
+ 'io.opentelemetry.internal.shaded.jctools.util.UnsafeRefArrayAccess',
+ 'io.opentelemetry.exporter.internal.marshal.UnsafeAccess',
+ 'io.opentelemetry.exporter.internal.marshal.UnsafeAccess$UnsafeHolder'
)
ignoreMissingClasses(
@@ -78,10 +80,6 @@ thirdPartyAudit {
'org.conscrypt.ConscryptHostnameVerifier',
'org.openjsse.javax.net.ssl.SSLParameters',
'org.openjsse.javax.net.ssl.SSLSocket',
- 'io.opentelemetry.api.events.EventBuilder',
- 'io.opentelemetry.api.events.EventEmitter',
- 'io.opentelemetry.api.events.EventEmitterBuilder',
- 'io.opentelemetry.api.events.EventEmitterProvider',
'io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties',
'io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider',
'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider',
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1
deleted file mode 100644
index b577500d71e1d..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-59470f4aa3a9207f21936461b8fdcb36d46455ab
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..415fe8f3d8aaa
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1
@@ -0,0 +1 @@
+396b89a66526bd5694ad3bef4604b876177e0b44
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1
new file mode 100644
index 0000000000000..9c3c9f43d153c
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1
@@ -0,0 +1 @@
+1a1fd96155e1b58726300bbf8457630713035e51
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-LICENSE.txt
similarity index 100%
rename from plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-LICENSE.txt
rename to plugins/telemetry-otel/licenses/opentelemetry-api-incubator-LICENSE.txt
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-NOTICE.txt
similarity index 100%
rename from plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-NOTICE.txt
rename to plugins/telemetry-otel/licenses/opentelemetry-api-incubator-NOTICE.txt
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1
deleted file mode 100644
index d3156577248d5..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8850bc4c65d0fd22ff987b4683206ec4e69f2689
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..115d4ccb1f34b
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1
@@ -0,0 +1 @@
+f0601fb1c06f661afeffbc73a1dbe29797b2f13b
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1
deleted file mode 100644
index f176b21d12dc4..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8d1cb823ab18fa871a1549e7c522bf28f2b3d8fe
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..a10b92995becd
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1
@@ -0,0 +1 @@
+570d71e39e36fe2caad142557bde0c11fcdb3b92
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1
deleted file mode 100644
index cd25e0ab9f294..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-bc045cae89ff6f18071760f6e4659dd880e88a1b
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..f43393104296a
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1
@@ -0,0 +1 @@
+f5b528f8d6f8531836eabba698979516964b24ed
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1
deleted file mode 100644
index fabb394f9c2e0..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5ee49902ba884d6c3e48499a9311a624396d9630
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..5adba2ba0f342
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1
@@ -0,0 +1 @@
+04fc0e4983253ea58430c3d24b6b3c5c95f84dc9
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1
deleted file mode 100644
index 378ba4d43dcd1..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2706e3b883d2bcd1a6b3e0bb4118ffbd7820550b
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..ea9c293f25025
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1
@@ -0,0 +1 @@
+a2b8571e36b11c3153d31ec87ec69cc168af8036
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1
deleted file mode 100644
index a3d7e15e1a624..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dcc924787b559278697b74dbc5bb6d046b236ef6
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..dcf23f16ac89f
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1
@@ -0,0 +1 @@
+1a8947a2e28924ad9374e319150a23837926ca4b
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1
deleted file mode 100644
index 71ab3e184db9e..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.36.0-alpha.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d58f7c669e371f6ff61b705770af9a3c1f31df52
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1
deleted file mode 100644
index c9a75d1b4350a..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4056d1b562b4da7720817d8af15d1d3ccdf4b776
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..f603af04d8012
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1
@@ -0,0 +1 @@
+ba9afdf3ef1ea51e42999fd68c959e3ceb219399
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1
deleted file mode 100644
index c31584f59c0d8..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-11d6f8c7b029efcb5c6c449cadef155b781afb78
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..f9419f6ccfbee
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1
@@ -0,0 +1 @@
+fb8168627bf0059445f61081eaa47c4ab787fc2e
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1
deleted file mode 100644
index a134bb06ec635..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-98e94479db1e68c4779efc44bf6b4fca83e98b54
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..63269f239eacd
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1
@@ -0,0 +1 @@
+b6b45155399bc9fa563945f3e3a77416d7165948
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1
deleted file mode 100644
index d146241f52f29..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-4f8f5d30c3eeede7b2260d979d9f403cfa381c3d
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..f18c8259c1adc
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1
@@ -0,0 +1 @@
+522d46926cc06a4c18829da7e4c4340bdf5673c3
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1
deleted file mode 100644
index 802761e38846c..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.36.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e3068cbaedfac6a28c6483923982b2efb861d3f4
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1
new file mode 100644
index 0000000000000..03b81424f46d5
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1
@@ -0,0 +1 @@
+0b72722a5bbea5f46319bf08b2caed5b8f987a92
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1
deleted file mode 100644
index e730c83af905e..0000000000000
--- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.23.1-alpha.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-218e361772670212a46be5940010222d68e66f2a
\ No newline at end of file
diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1
new file mode 100644
index 0000000000000..7cf8e7e8ede28
--- /dev/null
+++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1
@@ -0,0 +1 @@
+76b3d4ca0a8f20b27c1590ceece54f0c7fb5857e
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1
deleted file mode 100644
index faaf70c858a6e..0000000000000
--- a/plugins/transport-nio/licenses/netty-buffer-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3d918a9ee057d995c362902b54634fc307132aac
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..6784ac6c3b64f
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-buffer-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+b54863f578939e135d3b3aea610284ae57c188cf
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1
deleted file mode 100644
index 7affbc14fa93a..0000000000000
--- a/plugins/transport-nio/licenses/netty-codec-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f1fa43b03e93ab88e805b6a4e3e83780c80b47d2
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..3d86194de9213
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-codec-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+a6762ec00a6d268f9980741f5b755838bcd658bf
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1
deleted file mode 100644
index 07730a5606ce2..0000000000000
--- a/plugins/transport-nio/licenses/netty-codec-http-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d05cd927209ea25bbf342962c00b8e5a828c2a4
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..4ef1adb818300
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+c6ecbc452321e632bf3cea0f9758839b650455c7
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 568c0aa2a2c03..0000000000000
--- a/plugins/transport-nio/licenses/netty-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ec361e7e025c029be50c55c8480080cabcbc01e7
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..16cb1cce7f504
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+58210befcb31adbcadd5724966a061444db91863
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2d6050dd1e3a5..0000000000000
--- a/plugins/transport-nio/licenses/netty-handler-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-168db749c22652ee7fed1ebf7ec46ce856d75e51
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..2f70f791f65ed
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-handler-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1
deleted file mode 100644
index c3ee8087a8b5d..0000000000000
--- a/plugins/transport-nio/licenses/netty-resolver-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-66c15921104cda0159b34e316541bc765dfaf3c0
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..621cbf58f3133
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-resolver-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+3493179999f211dc49714319f81da2be86523a3b
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1
deleted file mode 100644
index 32c8fa2b876a2..0000000000000
--- a/plugins/transport-nio/licenses/netty-transport-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b91f04c39ac14d6a29d07184ef305953ee6e0348
\ No newline at end of file
diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..ac96e7545ed58
--- /dev/null
+++ b/plugins/transport-nio/licenses/netty-transport-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+24e97cf14ea9d80afe4c5ab69066b587fccc154a
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1
deleted file mode 100644
index faaf70c858a6e..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3d918a9ee057d995c362902b54634fc307132aac
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..6784ac6c3b64f
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+b54863f578939e135d3b3aea610284ae57c188cf
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1
deleted file mode 100644
index 7affbc14fa93a..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-f1fa43b03e93ab88e805b6a4e3e83780c80b47d2
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..3d86194de9213
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+a6762ec00a6d268f9980741f5b755838bcd658bf
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1
deleted file mode 100644
index c4ca8f15e85c5..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-381c5bf8b7570c163fa7893a26d02b7ac36ff6eb
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..5e3f819012811
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+f988dbb527efb0e7cf7d444cc50b0fc3f5f380ec
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1
deleted file mode 100644
index 07730a5606ce2..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9d05cd927209ea25bbf342962c00b8e5a828c2a4
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..4ef1adb818300
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+c6ecbc452321e632bf3cea0f9758839b650455c7
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
deleted file mode 100644
index ebd1e0d52efb2..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e0849843eb5b1c036b12551baca98a9f7ff847a0
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..06c86b8fda557
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+f0cca5df75bfb4f858d0435f601d8b1cae1de054
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 568c0aa2a2c03..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ec361e7e025c029be50c55c8480080cabcbc01e7
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..16cb1cce7f504
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+58210befcb31adbcadd5724966a061444db91863
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2d6050dd1e3a5..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-168db749c22652ee7fed1ebf7ec46ce856d75e51
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..2f70f791f65ed
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+2bc6a58ad2e9e279634b6e55022e8dcd3c175cc4
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1
deleted file mode 100644
index c3ee8087a8b5d..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-66c15921104cda0159b34e316541bc765dfaf3c0
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..621cbf58f3133
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+3493179999f211dc49714319f81da2be86523a3b
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1
deleted file mode 100644
index 18d122acd2c44..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3e687cdc4ecdbbad07508a11b715bdf95fa20939
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..b22ad6784809b
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+5ac6a3d96935129ba45ea768ad30e31cad0d8c4d
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1
deleted file mode 100644
index 32c8fa2b876a2..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b91f04c39ac14d6a29d07184ef305953ee6e0348
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..ac96e7545ed58
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+24e97cf14ea9d80afe4c5ab69066b587fccc154a
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
deleted file mode 100644
index 2c468962b1b64..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.110.Final.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a7096e7c0a25a983647909d7513f5d4943d589c0
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
new file mode 100644
index 0000000000000..0847ac3034db7
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.111.Final.jar.sha1
@@ -0,0 +1 @@
+acafc128cddafa021bc0b48b0788eb0e118add5e
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1
deleted file mode 100644
index cbcbfcd87d682..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.19.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-639e2c63ade6f2a49d7e501ca2264b74d240b448
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1
new file mode 100644
index 0000000000000..2f4d023c88c80
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1
@@ -0,0 +1 @@
+1a5ef52a470a82d9313e2e1ad8ba064bdbd38948
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1
deleted file mode 100644
index 1eeedfc0926f5..0000000000000
--- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.19.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b4bbb1aeb64ecb2b3949c38983032a7f0b0ebd07
\ No newline at end of file
diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1
new file mode 100644
index 0000000000000..6c031e00e39c1
--- /dev/null
+++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1
@@ -0,0 +1 @@
+8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46
\ No newline at end of file
diff --git a/release-notes/opensearch.release-notes-2.15.0.md b/release-notes/opensearch.release-notes-2.15.0.md
new file mode 100644
index 0000000000000..e3b7cfc0558f3
--- /dev/null
+++ b/release-notes/opensearch.release-notes-2.15.0.md
@@ -0,0 +1,75 @@
+## 2024-06-12 Version 2.15.0 Release Notes
+
+## [2.15.0]
+### Added
+- Add leader and follower check failure counter metrics ([#12439](https://github.com/opensearch-project/OpenSearch/pull/12439))
+- Add latency metrics for instrumenting critical clusterManager code paths ([#12333](https://github.com/opensearch-project/OpenSearch/pull/12333))
+- Add support for Azure Managed Identity in repository-azure ([#12423](https://github.com/opensearch-project/OpenSearch/issues/12423))
+- Add useCompoundFile index setting ([#13478](https://github.com/opensearch-project/OpenSearch/pull/13478))
+- Make outbound side of transport protocol dependent ([#13293](https://github.com/opensearch-project/OpenSearch/pull/13293))
+- [Remote Store] Upload translog checkpoint as object metadata to translog.tlog([#13637](https://github.com/opensearch-project/OpenSearch/pull/13637))
+- [Remote Store] Add dynamic cluster settings to set timeout for segments upload to Remote Store ([#13679](https://github.com/opensearch-project/OpenSearch/pull/13679))
+- Add getMetadataFields to MapperService ([#13819](https://github.com/opensearch-project/OpenSearch/pull/13819))
+- Add "wildcard" field type that supports efficient wildcard, prefix, and regexp queries ([#13461](https://github.com/opensearch-project/OpenSearch/pull/13461))
+- Allow setting query parameters on requests ([#13776](https://github.com/opensearch-project/OpenSearch/issues/13776))
+- Add dynamic action retry timeout setting ([#14022](https://github.com/opensearch-project/OpenSearch/issues/14022))
+- Add capability to disable source recovery_source for an index ([#13590](https://github.com/opensearch-project/OpenSearch/pull/13590))
+- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304))
+- Add upload flow for writing routing table to remote store ([#13870](https://github.com/opensearch-project/OpenSearch/pull/13870))
+- Add dynamic action retry timeout setting ([#14022](https://github.com/opensearch-project/OpenSearch/issues/14022))
+- [Remote Store] Add support to disable flush based on translog reader count ([#14027](https://github.com/opensearch-project/OpenSearch/pull/14027))
+- Add recovery chunk size setting ([#13997](https://github.com/opensearch-project/OpenSearch/pull/13997))
+- [Query Insights] Add exporter support for top n queries ([#12982](https://github.com/opensearch-project/OpenSearch/pull/12982))
+- [Query Insights] Add X-Opaque-Id to search request metadata for top n queries ([#13374](https://github.com/opensearch-project/OpenSearch/pull/13374))
+- [Streaming Indexing] Enhance RestAction with request / response streaming support ([#13772](https://github.com/opensearch-project/OpenSearch/pull/13772))
+- Move Remote Store Migration from DocRep to GA and modify remote migration settings name ([#14100](https://github.com/opensearch-project/OpenSearch/pull/14100))
+- [Remote State] Add async remote state deletion task running on an interval, configurable by a setting ([#13995](https://github.com/opensearch-project/OpenSearch/pull/13995))
+- Add remote routing table for remote state publication with experimental feature flag ([#13304](https://github.com/opensearch-project/OpenSearch/pull/13304))
+- Add support for query level resource usage tracking ([#13172](https://github.com/opensearch-project/OpenSearch/pull/13172))
+- [Query Insights] Add cpu and memory metrics to top n queries ([#13739](https://github.com/opensearch-project/OpenSearch/pull/13739))
+- Derived field object type support ([#13720](https://github.com/opensearch-project/OpenSearch/pull/13720))
+- Support Dynamic Pruning in Cardinality Aggregation ([#13821](https://github.com/opensearch-project/OpenSearch/pull/13821))
+
+### Dependencies
+- Bump `com.github.spullara.mustache.java:compiler` from 0.9.10 to 0.9.13 ([#13329](https://github.com/opensearch-project/OpenSearch/pull/13329), [#13559](https://github.com/opensearch-project/OpenSearch/pull/13559))
+- Bump `org.apache.commons:commons-text` from 1.11.0 to 1.12.0 ([#13557](https://github.com/opensearch-project/OpenSearch/pull/13557))
+- Bump `org.hdrhistogram:HdrHistogram` from 2.1.12 to 2.2.2 ([#13556](https://github.com/opensearch-project/OpenSearch/pull/13556), [#13986](https://github.com/opensearch-project/OpenSearch/pull/13986))
+- Bump `com.gradle.enterprise` from 3.17.2 to 3.17.4 ([#13641](https://github.com/opensearch-project/OpenSearch/pull/13641), [#13753](https://github.com/opensearch-project/OpenSearch/pull/13753))
+- Bump `org.apache.hadoop:hadoop-minicluster` from 3.3.6 to 3.4.0 ([#13642](https://github.com/opensearch-project/OpenSearch/pull/13642))
+- Bump `mockito` from 5.11.0 to 5.12.0 ([#13665](https://github.com/opensearch-project/OpenSearch/pull/13665))
+- Bump `com.google.code.gson:gson` from 2.10.1 to 2.11.0 ([#13752](https://github.com/opensearch-project/OpenSearch/pull/13752))
+- Bump `ch.qos.logback:logback-core` from 1.5.3 to 1.5.6 ([#13756](https://github.com/opensearch-project/OpenSearch/pull/13756))
+- Bump `netty` from 4.1.109.Final to 4.1.110.Final ([#13802](https://github.com/opensearch-project/OpenSearch/pull/13802))
+- Bump `jackson` from 2.17.0 to 2.17.1 ([#13817](https://github.com/opensearch-project/OpenSearch/pull/13817))
+- Bump `reactor` from 3.5.15 to 3.5.17 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825))
+- Bump `reactor-netty` from 1.1.17 to 1.1.19 ([#13825](https://github.com/opensearch-project/OpenSearch/pull/13825))
+- Bump `commons-cli:commons-cli` from 1.7.0 to 1.8.0 ([#13840](https://github.com/opensearch-project/OpenSearch/pull/13840))
+- Bump `org.apache.xmlbeans:xmlbeans` from 5.2.0 to 5.2.1 ([#13839](https://github.com/opensearch-project/OpenSearch/pull/13839))
+- Bump `actions/checkout` from 3 to 4 ([#13935](https://github.com/opensearch-project/OpenSearch/pull/13935))
+- Bump `com.netflix.nebula.ospackage-base` from 11.9.0 to 11.9.1 ([#13933](https://github.com/opensearch-project/OpenSearch/pull/13933))
+- Bump `com.azure:azure-core-http-netty` from 1.12.8 to 1.15.1 ([#14128](https://github.com/opensearch-project/OpenSearch/pull/14128))
+- Bump `tim-actions/get-pr-commits` from 1.1.0 to 1.3.1 ([#14126](https://github.com/opensearch-project/OpenSearch/pull/14126))
+
+### Changed
+- Add ability for Boolean and date field queries to run when only doc_values are enabled ([#11650](https://github.com/opensearch-project/OpenSearch/pull/11650))
+- Refactor implementations of query phase searcher, allow QueryCollectorContext to have zero collectors ([#13481](https://github.com/opensearch-project/OpenSearch/pull/13481))
+- Adds support to inject telemetry instances to plugins ([#13636](https://github.com/opensearch-project/OpenSearch/pull/13636))
+- Adds support to provide tags with value in Gauge metric. ([#13994](https://github.com/opensearch-project/OpenSearch/pull/13994))
+- Move cache removal notifications outside lru lock ([#14017](https://github.com/opensearch-project/OpenSearch/pull/14017))
+
+### Removed
+- Remove handling of index.mapper.dynamic in AutoCreateIndex([#13067](https://github.com/opensearch-project/OpenSearch/pull/13067))
+
+### Fixed
+- Fix get field mapping API returns 404 error in mixed cluster with multiple versions ([#13624](https://github.com/opensearch-project/OpenSearch/pull/13624))
+- Allow clearing `remote_store.compatibility_mode` setting ([#13646](https://github.com/opensearch-project/OpenSearch/pull/13646))
+- Painless: ensure type "UnmodifiableMap" for params ([#13885](https://github.com/opensearch-project/OpenSearch/pull/13885))
+- Don't return negative scores from `multi_match` query with `cross_fields` type ([#13829](https://github.com/opensearch-project/OpenSearch/pull/13829))
+- Pass parent filter to inner hit query ([#13903](https://github.com/opensearch-project/OpenSearch/pull/13903))
+- Fix NPE on restore searchable snapshot ([#13911](https://github.com/opensearch-project/OpenSearch/pull/13911))
+- Fix double invocation of postCollection when MultiBucketCollector is present ([#14015](https://github.com/opensearch-project/OpenSearch/pull/14015))
+- Fix ReplicaShardBatchAllocator to batch shards without duplicates ([#13710](https://github.com/opensearch-project/OpenSearch/pull/13710))
+- Java high-level REST client bulk() is not respecting the bulkRequest.requireAlias(true) method call ([#14146](https://github.com/opensearch-project/OpenSearch/pull/14146))
+- Fix ShardNotFoundException during request cache clean up ([#14219](https://github.com/opensearch-project/OpenSearch/pull/14219))
+- Fix Concurrent Modification Exception in Indices Request Cache([#14032](https://github.com/opensearch-project/OpenSearch/pull/14221))
+- Fix the rewrite method for MatchOnlyText field query ([#14248](https://github.com/opensearch-project/OpenSearch/pull/14248))
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml
index fa71137912a91..a75b1d0eac793 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml
@@ -658,6 +658,7 @@ setup:
settings:
number_of_replicas: 0
number_of_shards: 1
+ refresh_interval: -1
mappings:
properties:
date:
@@ -677,6 +678,11 @@ setup:
- '{"index": {}}'
- '{"date": "2016-03-01"}'
+ - do:
+ indices.forcemerge:
+ index: test_2
+ max_num_segments: 1
+
- do:
search:
index: test_2
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
index 3a0099dae3b33..ade9eb3eee0dc 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml
@@ -1083,6 +1083,7 @@ setup:
settings:
number_of_replicas: 0
number_of_shards: 1
+ refresh_interval: -1
mappings:
properties:
date:
@@ -1100,6 +1101,12 @@ setup:
- '{"date": "2016-02-01"}'
- '{"index": {}}'
- '{"date": "2016-03-01"}'
+
+ - do:
+ indices.forcemerge:
+ index: test_2
+ max_num_segments: 1
+
- do:
search:
index: test_2
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml
index 1356eac41ae79..0897e0bdd894b 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/330_auto_date_histogram.yml
@@ -106,8 +106,41 @@ setup:
version: " - 2.99.99"
reason: debug info for filter rewrite added in 3.0.0 (to be backported to 2.14.0)
+ - do:
+ indices.create:
+ index: test_profile
+ body:
+ settings:
+ number_of_shards: 1
+ number_of_replicas: 0
+ refresh_interval: -1
+ mappings:
+ properties:
+ date:
+ type: date
+
+ - do:
+ bulk:
+ index: test_profile
+ refresh: true
+ body:
+ - '{"index": {}}'
+ - '{"date": "2020-03-01", "v": 1}'
+ - '{"index": {}}'
+ - '{"date": "2020-03-02", "v": 2}'
+ - '{"index": {}}'
+ - '{"date": "2020-03-08", "v": 3}'
+ - '{"index": {}}'
+ - '{"date": "2020-03-09", "v": 4}'
+
+ - do:
+ indices.forcemerge:
+ index: test_profile
+ max_num_segments: 1
+
- do:
search:
+ index: test_profile
body:
profile: true
size: 0
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml
index 7d887d56ae8fe..80aad96ce1f6b 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml
@@ -14,6 +14,9 @@ setup:
date:
type: date
format: epoch_second
+ scaled_field:
+ type: scaled_float
+ scaling_factor: 100
- do:
cluster.health:
@@ -528,3 +531,145 @@ setup:
- is_false: aggregations.unsigned_long_range.buckets.2.to
- match: { aggregations.unsigned_long_range.buckets.2.doc_count: 0 }
+
+---
+"Double range profiler shows filter rewrite info":
+ - skip:
+ version: " - 2.99.99"
+ reason: debug info for filter rewrite added in 3.0.0 (to be backported to 2.15.0)
+
+ - do:
+ indices.create:
+ index: test_profile
+ body:
+ settings:
+ number_of_replicas: 0
+ number_of_shards: 1
+ refresh_interval: -1
+ mappings:
+ properties:
+ ip:
+ type: ip
+ double:
+ type: double
+ date:
+ type: date
+ format: epoch_second
+
+ - do:
+ bulk:
+ index: test_profile
+ refresh: true
+ body:
+ - '{"index": {}}'
+ - '{"double" : 42}'
+ - '{"index": {}}'
+ - '{"double" : 100}'
+ - '{"index": {}}'
+ - '{"double" : 50}'
+
+ - do:
+ indices.forcemerge:
+ index: test_profile
+ max_num_segments: 1
+
+ - do:
+ search:
+ index: test_profile
+ body:
+ size: 0
+ profile: true
+ aggs:
+ double_range:
+ range:
+ field: double
+ ranges:
+ - to: 50
+ - from: 50
+ to: 150
+ - from: 150
+
+ - length: { aggregations.double_range.buckets: 3 }
+
+ - match: { aggregations.double_range.buckets.0.key: "*-50.0" }
+ - is_false: aggregations.double_range.buckets.0.from
+ - match: { aggregations.double_range.buckets.0.to: 50.0 }
+ - match: { aggregations.double_range.buckets.0.doc_count: 1 }
+ - match: { aggregations.double_range.buckets.1.key: "50.0-150.0" }
+ - match: { aggregations.double_range.buckets.1.from: 50.0 }
+ - match: { aggregations.double_range.buckets.1.to: 150.0 }
+ - match: { aggregations.double_range.buckets.1.doc_count: 2 }
+ - match: { aggregations.double_range.buckets.2.key: "150.0-*" }
+ - match: { aggregations.double_range.buckets.2.from: 150.0 }
+ - is_false: aggregations.double_range.buckets.2.to
+ - match: { aggregations.double_range.buckets.2.doc_count: 0 }
+
+ - match: { profile.shards.0.aggregations.0.debug.optimized_segments: 1 }
+ - match: { profile.shards.0.aggregations.0.debug.unoptimized_segments: 0 }
+ - match: { profile.shards.0.aggregations.0.debug.leaf_visited: 1 }
+ - match: { profile.shards.0.aggregations.0.debug.inner_visited: 0 }
+
+---
+"Scaled Float Range Aggregation":
+ - do:
+ index:
+ index: test
+ id: 1
+ body: { "scaled_field": 1 }
+
+ - do:
+ index:
+ index: test
+ id: 2
+ body: { "scaled_field": 1.53 }
+
+ - do:
+ index:
+ index: test
+ id: 3
+ body: { "scaled_field": -2.1 }
+
+ - do:
+ index:
+ index: test
+ id: 4
+ body: { "scaled_field": 1.53 }
+
+ - do:
+ indices.refresh: { }
+
+ - do:
+ search:
+ index: test
+ body:
+ size: 0
+ aggs:
+ my_range:
+ range:
+ field: scaled_field
+ ranges:
+ - to: 0
+ - from: 0
+ to: 1
+ - from: 1
+ to: 1.5
+ - from: 1.5
+
+ - length: { aggregations.my_range.buckets: 4 }
+
+ - match: { aggregations.my_range.buckets.0.key: "*-0.0" }
+ - is_false: aggregations.my_range.buckets.0.from
+ - match: { aggregations.my_range.buckets.0.to: 0.0 }
+ - match: { aggregations.my_range.buckets.0.doc_count: 1 }
+ - match: { aggregations.my_range.buckets.1.key: "0.0-1.0" }
+ - match: { aggregations.my_range.buckets.1.from: 0.0 }
+ - match: { aggregations.my_range.buckets.1.to: 1.0 }
+ - match: { aggregations.my_range.buckets.1.doc_count: 0 }
+ - match: { aggregations.my_range.buckets.2.key: "1.0-1.5" }
+ - match: { aggregations.my_range.buckets.2.from: 1.0 }
+ - match: { aggregations.my_range.buckets.2.to: 1.5 }
+ - match: { aggregations.my_range.buckets.2.doc_count: 1 }
+ - match: { aggregations.my_range.buckets.3.key: "1.5-*" }
+ - match: { aggregations.my_range.buckets.3.from: 1.5 }
+ - is_false: aggregations.my_range.buckets.3.to
+ - match: { aggregations.my_range.buckets.3.doc_count: 2 }
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml
new file mode 100644
index 0000000000000..05b6b2e5ed712
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml
@@ -0,0 +1,229 @@
+setup:
+ - skip:
+ version: " - 2.99.99"
+ reason: "Added in 2.15, but need to skip pre-3.0 before backport"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ properties:
+ my_field:
+ type: wildcard
+ fields:
+ lower:
+ type: wildcard
+ normalizer: lowercase
+ doc_values:
+ type: wildcard
+ doc_values: true
+
+ - do:
+ index:
+ index: test
+ id: 1
+ body:
+ my_field: "org.opensearch.transport.NodeDisconnectedException: [node_s0][127.0.0.1:39953][disconnected] disconnected"
+ - do:
+ index:
+ index: test
+ id: 2
+ body:
+ my_field: "[2024-06-08T06:31:37,443][INFO ][o.o.c.c.Coordinator ] [node_s2] cluster-manager node [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}] failed, restarting discovery"
+
+ - do:
+ index:
+ index: test
+ id: 3
+ body:
+ my_field: "[2024-06-08T06:31:37,451][INFO ][o.o.c.s.ClusterApplierService] [node_s2] cluster-manager node changed {previous [{node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true}], current []}, term: 1, version: 24, reason: becoming candidate: onLeaderFailure"
+ - do:
+ index:
+ index: test
+ id: 4
+ body:
+ my_field: "[2024-06-08T06:31:37,452][WARN ][o.o.c.NodeConnectionsService] [node_s1] failed to connect to {node_s0}{Nj7FjR7hRP2lh_zur8KN_g}{OTGOoWmmSsWP_RQ3tIKJ9g}{127.0.0.1}{127.0.0.1:39953}{imr}{shard_indexing_pressure_enabled=true} (tried [1] times)"
+ - do:
+ index:
+ index: test
+ id: 5
+ body:
+ my_field: "AbCd"
+ - do:
+ index:
+ index: test
+ id: 6
+ body:
+ other_field: "test"
+ - do:
+ indices.refresh: {}
+
+---
+"term query matches exact value":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field: "AbCd"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field.doc_values: "AbCd"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+---
+"term query matches lowercase-normalized value":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field.lower: "abcd"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field.lower: "ABCD"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "5" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ term:
+ my_field: "abcd"
+ - match: { hits.total.value: 0 }
+
+---
+"wildcard query matches":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field:
+ value: "*Node*Exception*"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "1" }
+
+---
+"wildcard query matches lowercase-normalized field":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field.lower:
+ value: "*node*exception*"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "1" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field.lower:
+ value: "*NODE*EXCEPTION*"
+ - match: { hits.total.value: 1 }
+ - match: { hits.hits.0._id: "1" }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field:
+ value: "*node*exception*"
+ - match: { hits.total.value: 0 }
+
+---
+"prefix query matches":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ prefix:
+ my_field:
+ value: "[2024-06-08T"
+ - match: { hits.total.value: 3 }
+
+---
+"regexp query matches":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field:
+ value: ".*06-08.*cluster-manager node.*"
+ - match: { hits.total.value: 2 }
+
+---
+"regexp query matches lowercase-normalized field":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field.lower:
+ value: ".*06-08.*Cluster-Manager Node.*"
+ - match: { hits.total.value: 2 }
+
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field:
+ value: ".*06-08.*Cluster-Manager Node.*"
+ - match: { hits.total.value: 0 }
+
+---
+"wildcard match-all works":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ wildcard:
+ my_field:
+ value: "*"
+ - match: { hits.total.value: 5 }
+---
+"regexp match-all works":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ regexp:
+ my_field:
+ value: ".*"
+ - match: { hits.total.value: 5 }
diff --git a/server/build.gradle b/server/build.gradle
index 624e5fe332662..429af5d0ac258 100644
--- a/server/build.gradle
+++ b/server/build.gradle
@@ -356,14 +356,18 @@ tasks.named("thirdPartyAudit").configure {
}
tasks.named("dependencyLicenses").configure {
+ mapping from: /jackson-.*/, to: 'jackson'
mapping from: /reactor-.*/, to: 'reactor'
mapping from: /lucene-.*/, to: 'lucene'
- dependencies = project.configurations.runtimeClasspath.fileCollection {
- it.group.startsWith('org.opensearch') == false ||
- // keep the following org.opensearch jars in
- (it.name == 'jna' ||
- it.name == 'securesm')
- }
+ dependencies = project.configurations.runtimeClasspath.incoming.artifactView {
+ componentFilter {
+ it instanceof ModuleComponentIdentifier &&
+ (it.group.startsWith('org.opensearch') == false ||
+ // keep the following org.opensearch jars in
+ (it.name == 'jna' ||
+ it.name == 'securesm'))
+ }
+ }.files
}
tasks.named("filepermissions").configure {
@@ -405,6 +409,7 @@ tasks.register("japicmp", me.champeau.gradle.japicmp.JapicmpTask) {
failOnModification = true
ignoreMissingClasses = true
annotationIncludes = ['@org.opensearch.common.annotation.PublicApi', '@org.opensearch.common.annotation.DeprecatedApi']
+ annotationExcludes = ['@org.opensearch.common.annotation.InternalApi']
txtOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.txt")
htmlOutputFile = layout.buildDirectory.file("reports/java-compatibility/report.html")
dependsOn downloadJapicmpCompareTarget
diff --git a/server/licenses/jackson-LICENSE b/server/licenses/jackson-LICENSE
new file mode 100644
index 0000000000000..f5f45d26a49d6
--- /dev/null
+++ b/server/licenses/jackson-LICENSE
@@ -0,0 +1,8 @@
+This copy of Jackson JSON processor streaming parser/generator is licensed under the
+Apache (Software) License, version 2.0 ("the License").
+See the License for details about distribution rights, and the
+specific rights regarding derivate works.
+
+You may obtain a copy of the License at:
+
+http://www.apache.org/licenses/LICENSE-2.0
diff --git a/server/licenses/jackson-NOTICE b/server/licenses/jackson-NOTICE
new file mode 100644
index 0000000000000..4c976b7b4cc58
--- /dev/null
+++ b/server/licenses/jackson-NOTICE
@@ -0,0 +1,20 @@
+# Jackson JSON processor
+
+Jackson is a high-performance, Free/Open Source JSON processing library.
+It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has
+been in development since 2007.
+It is currently developed by a community of developers, as well as supported
+commercially by FasterXML.com.
+
+## Licensing
+
+Jackson core and extension components may licensed under different licenses.
+To find the details that apply to this artifact see the accompanying LICENSE file.
+For more information, including possible other licensing options, contact
+FasterXML.com (http://fasterxml.com).
+
+## Credits
+
+A list of contributors may be found from CREDITS file, which is included
+in some artifacts (usually source distributions); but is always available
+from the source code management (SCM) system project uses.
diff --git a/server/licenses/jackson-core-2.17.1.jar.sha1 b/server/licenses/jackson-core-2.17.1.jar.sha1
new file mode 100644
index 0000000000000..82dab5981e652
--- /dev/null
+++ b/server/licenses/jackson-core-2.17.1.jar.sha1
@@ -0,0 +1 @@
+5e52a11644cd59a28ef79f02bddc2cc3bab45edb
\ No newline at end of file
diff --git a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1
new file mode 100644
index 0000000000000..ff42ed1f92cfe
--- /dev/null
+++ b/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1
@@ -0,0 +1 @@
+ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71
\ No newline at end of file
diff --git a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1
new file mode 100644
index 0000000000000..47d19067cf2a6
--- /dev/null
+++ b/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1
@@ -0,0 +1 @@
+89683ac4f0a0c2c4f69ea56b90480ed40266dac8
\ No newline at end of file
diff --git a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1
new file mode 100644
index 0000000000000..7946e994c7104
--- /dev/null
+++ b/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1
@@ -0,0 +1 @@
+b4c7b8a9ea3f398116a75c146b982b22afebc4ee
\ No newline at end of file
diff --git a/server/licenses/jopt-simple-5.0.4.jar.sha1 b/server/licenses/jopt-simple-5.0.4.jar.sha1
new file mode 100644
index 0000000000000..7ade81efe4d0d
--- /dev/null
+++ b/server/licenses/jopt-simple-5.0.4.jar.sha1
@@ -0,0 +1 @@
+4fdac2fbe92dfad86aa6e9301736f6b4342a3f5c
\ No newline at end of file
diff --git a/server/licenses/jopt-simple-LICENSE.txt b/server/licenses/jopt-simple-LICENSE.txt
new file mode 100644
index 0000000000000..85f923a95268a
--- /dev/null
+++ b/server/licenses/jopt-simple-LICENSE.txt
@@ -0,0 +1,24 @@
+/*
+ The MIT License
+
+ Copyright (c) 2004-2015 Paul R. Holser, Jr.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
diff --git a/server/licenses/jopt-simple-NOTICE.txt b/server/licenses/jopt-simple-NOTICE.txt
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/server/licenses/lucene-analysis-common-9.11.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.11.0.jar.sha1
deleted file mode 100644
index 7139f6a43a15a..0000000000000
--- a/server/licenses/lucene-analysis-common-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-75a0a333cf1e043102743066c929e65fe51cbcda
\ No newline at end of file
diff --git a/server/licenses/lucene-analysis-common-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-analysis-common-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..f1249066d10f2
--- /dev/null
+++ b/server/licenses/lucene-analysis-common-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+7e282aab7388efc911348f1eacd90e661580dda7
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1
deleted file mode 100644
index 735e80b60b001..0000000000000
--- a/server/licenses/lucene-backward-codecs-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-db385446bc3fd70e7c6a744276c0a157bd60ee0a
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-backward-codecs-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..ac50c5e110a72
--- /dev/null
+++ b/server/licenses/lucene-backward-codecs-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+69e59ba4bed4c58836d2727d72b7f0095d2dcb92
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.11.0.jar.sha1 b/server/licenses/lucene-core-9.11.0.jar.sha1
deleted file mode 100644
index b0d38c4165581..0000000000000
--- a/server/licenses/lucene-core-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2e487755a6814b2a1bc770c26569dcba86873dcf
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..e3fd1708ea428
--- /dev/null
+++ b/server/licenses/lucene-core-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+51ff4940eb1024184bbaa5dae39695d2392c5bab
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.11.0.jar.sha1 b/server/licenses/lucene-grouping-9.11.0.jar.sha1
deleted file mode 100644
index 562de95605b60..0000000000000
--- a/server/licenses/lucene-grouping-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-882bdaf209b0acb332aa34836616424bcbecf462
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-grouping-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..cc5bf5bfd8ec0
--- /dev/null
+++ b/server/licenses/lucene-grouping-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+5847a7d47f13ecb7f039fb9adf6f3b8e4bddde77
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.11.0.jar.sha1 b/server/licenses/lucene-highlighter-9.11.0.jar.sha1
deleted file mode 100644
index e0ef36d321c9d..0000000000000
--- a/server/licenses/lucene-highlighter-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-44accdc03c5482e602718f7bf91e5940ba4e4870
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-highlighter-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..eb14059d2cd8c
--- /dev/null
+++ b/server/licenses/lucene-highlighter-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+7cc0a26777a479f06fbcfae7abc23e784e1a00dc
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.11.0.jar.sha1 b/server/licenses/lucene-join-9.11.0.jar.sha1
deleted file mode 100644
index 34c618ccfbcc7..0000000000000
--- a/server/licenses/lucene-join-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-32a30ee03ed4f3e43bf63250270b2d4d53050045
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-join-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..b87170c39c78c
--- /dev/null
+++ b/server/licenses/lucene-join-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+9cd99401c826d910da3c2beab8e42f1af8be6ea4
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.11.0.jar.sha1 b/server/licenses/lucene-memory-9.11.0.jar.sha1
deleted file mode 100644
index d730cfb4b7660..0000000000000
--- a/server/licenses/lucene-memory-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b3e80aa6aa3299118e76a23edc23b58f3ba5a515
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-memory-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..de591dd659cb5
--- /dev/null
+++ b/server/licenses/lucene-memory-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+cfee136ecbc3df7adc38b38e020dca5e61c22773
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.11.0.jar.sha1 b/server/licenses/lucene-misc-9.11.0.jar.sha1
deleted file mode 100644
index 9be27f004435b..0000000000000
--- a/server/licenses/lucene-misc-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-54fe308908194e1b0697a1157a45c5998c9e1083
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-misc-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..1a999bb9c6686
--- /dev/null
+++ b/server/licenses/lucene-misc-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+afbc5adf93d4eb1a1b109ad828d1968bf16ef292
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.11.0.jar.sha1 b/server/licenses/lucene-queries-9.11.0.jar.sha1
deleted file mode 100644
index b445610c25858..0000000000000
--- a/server/licenses/lucene-queries-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-987d1286949ddf514b8405fd453ed47bebdfb12d
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-queries-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..783a26551ae8c
--- /dev/null
+++ b/server/licenses/lucene-queries-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+16907c36f6adb8ba8f260e05738c66afb37c72d3
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.11.0.jar.sha1 b/server/licenses/lucene-queryparser-9.11.0.jar.sha1
deleted file mode 100644
index a1620ba9c7708..0000000000000
--- a/server/licenses/lucene-queryparser-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-e97fe1c0d102edb8d6e1c01454992fd2b8d80ae0
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-queryparser-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..b3e9e4de96174
--- /dev/null
+++ b/server/licenses/lucene-queryparser-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+72baa9bddcf2efb71ffb695f1e9f548699ec13a0
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.11.0.jar.sha1 b/server/licenses/lucene-sandbox-9.11.0.jar.sha1
deleted file mode 100644
index 0dc193f054973..0000000000000
--- a/server/licenses/lucene-sandbox-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5e46b790744bd9118ccc053f70235364213312a5
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-sandbox-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..2aefa435b1e9a
--- /dev/null
+++ b/server/licenses/lucene-sandbox-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+dd3c63066f583d90b563ebaa6fbe61c603403acb
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1
deleted file mode 100644
index 9d3a8d2857db6..0000000000000
--- a/server/licenses/lucene-spatial-extras-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-079ca5aaf544a3acde84b8b88423ace6dedc23eb
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-spatial-extras-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..d27112c6db6ab
--- /dev/null
+++ b/server/licenses/lucene-spatial-extras-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+69b99530e0b05251c12863bee6a9325cafd5fdaa
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.11.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.11.0.jar.sha1
deleted file mode 100644
index fd5ff875a0113..0000000000000
--- a/server/licenses/lucene-spatial3d-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-564558818d70fc384db5b36fbc8a0ab27b107609
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-spatial3d-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..29423ac0ababd
--- /dev/null
+++ b/server/licenses/lucene-spatial3d-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+a67d193b4b08790169db7cf005a2429991260287
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.11.0.jar.sha1 b/server/licenses/lucene-suggest-9.11.0.jar.sha1
deleted file mode 100644
index 2fa96e97f307a..0000000000000
--- a/server/licenses/lucene-suggest-9.11.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-aa345db9b6caaf881e7890ea5b8911357d592167
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.12.0-snapshot-847316d.jar.sha1 b/server/licenses/lucene-suggest-9.12.0-snapshot-847316d.jar.sha1
new file mode 100644
index 0000000000000..6ce1f639ccbb7
--- /dev/null
+++ b/server/licenses/lucene-suggest-9.12.0-snapshot-847316d.jar.sha1
@@ -0,0 +1 @@
+7a1625ae39071ccbfb3af11df5a74291758f4b47
\ No newline at end of file
diff --git a/server/licenses/reactor-core-3.5.17.jar.sha1 b/server/licenses/reactor-core-3.5.17.jar.sha1
deleted file mode 100644
index 6663356bab047..0000000000000
--- a/server/licenses/reactor-core-3.5.17.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2cf9b080e3a2d8a5a39948260db5fd1dae54c3ac
\ No newline at end of file
diff --git a/server/licenses/reactor-core-3.5.18.jar.sha1 b/server/licenses/reactor-core-3.5.18.jar.sha1
new file mode 100644
index 0000000000000..c503f768beafa
--- /dev/null
+++ b/server/licenses/reactor-core-3.5.18.jar.sha1
@@ -0,0 +1 @@
+3a8157f7d66d71a407eb77ba12bce72a38c5b4da
\ No newline at end of file
diff --git a/server/licenses/snakeyaml-2.1.jar.sha1 b/server/licenses/snakeyaml-2.1.jar.sha1
new file mode 100644
index 0000000000000..5586b210a9736
--- /dev/null
+++ b/server/licenses/snakeyaml-2.1.jar.sha1
@@ -0,0 +1 @@
+c79f47315517560b5bd6a62376ee385e48105437
\ No newline at end of file
diff --git a/server/licenses/snakeyaml-LICENSE.txt b/server/licenses/snakeyaml-LICENSE.txt
new file mode 100644
index 0000000000000..d9a10c0d8e868
--- /dev/null
+++ b/server/licenses/snakeyaml-LICENSE.txt
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/server/licenses/snakeyaml-NOTICE.txt b/server/licenses/snakeyaml-NOTICE.txt
new file mode 100644
index 0000000000000..b51464eee1f00
--- /dev/null
+++ b/server/licenses/snakeyaml-NOTICE.txt
@@ -0,0 +1,24 @@
+***The art of simplicity is a puzzle of complexity.***
+
+## Overview ##
+[YAML](http://yaml.org) is a data serialization format designed for human readability and interaction with scripting languages.
+
+SnakeYAML is a YAML processor for the Java Virtual Machine.
+
+## SnakeYAML features ##
+
+* a **complete** [YAML 1.1 processor](http://yaml.org/spec/1.1/current.html). In particular, SnakeYAML can parse all examples from the specification.
+* Unicode support including UTF-8/UTF-16 input/output.
+* high-level API for serializing and deserializing native Java objects.
+* support for all types from the [YAML types repository](http://yaml.org/type/index.html).
+* relatively sensible error messages.
+
+## Info ##
+ * [Changes](https://bitbucket.org/asomov/snakeyaml/wiki/Changes)
+ * [Documentation](https://bitbucket.org/asomov/snakeyaml/wiki/Documentation)
+
+## Contribute ##
+* Mercurial DVCS is used to dance with the [source code](https://bitbucket.org/asomov/snakeyaml/src).
+* If you find a bug in SnakeYAML, please [file a bug report](https://bitbucket.org/asomov/snakeyaml/issues?status=new&status=open).
+* You may discuss SnakeYAML at
+[the mailing list](http://groups.google.com/group/snakeyaml-core).
\ No newline at end of file
diff --git a/server/licenses/zstd-jni-1.5.5-5.jar.sha1 b/server/licenses/zstd-jni-1.5.5-5.jar.sha1
new file mode 100644
index 0000000000000..498c60c34e3da
--- /dev/null
+++ b/server/licenses/zstd-jni-1.5.5-5.jar.sha1
@@ -0,0 +1 @@
+74ffdc5f140080adacf5278287aadd950179f848
\ No newline at end of file
diff --git a/server/licenses/zstd-jni-LICENSE.txt b/server/licenses/zstd-jni-LICENSE.txt
new file mode 100644
index 0000000000000..c4dd507c1c72f
--- /dev/null
+++ b/server/licenses/zstd-jni-LICENSE.txt
@@ -0,0 +1,29 @@
+-----------------------------------------------------------------------------
+** Beginning of "BSD License" text. **
+
+Zstd-jni: JNI bindings to Zstd Library
+
+Copyright (c) 2015-present, Luben Karavelov/ All rights reserved.
+
+BSD License
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/server/licenses/zstd-jni-NOTICE.txt b/server/licenses/zstd-jni-NOTICE.txt
new file mode 100644
index 0000000000000..389c97cbc892d
--- /dev/null
+++ b/server/licenses/zstd-jni-NOTICE.txt
@@ -0,0 +1 @@
+The code for the JNI bindings to Zstd library was originally authored by Luben Karavelov
diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java
index a1122f279c7e4..acbd68fff6dd0 100644
--- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java
@@ -42,24 +42,32 @@
import org.opensearch.Version;
import org.opensearch.action.admin.cluster.health.ClusterHealthRequest;
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
+import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
+import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.opensearch.action.admin.indices.shrink.ResizeType;
import org.opensearch.action.admin.indices.stats.IndicesStatsResponse;
import org.opensearch.client.Requests;
+import org.opensearch.cluster.metadata.RepositoryMetadata;
import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.unit.TimeValue;
+import org.opensearch.core.common.unit.ByteSizeValue;
import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.index.query.TermsQueryBuilder;
import org.opensearch.indices.recovery.RecoverySettings;
import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase;
+import org.opensearch.repositories.RepositoriesService;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.VersionUtils;
+import org.junit.Before;
import java.util.concurrent.ExecutionException;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteCloneIndexIT extends RemoteStoreBaseIntegTestCase {
@@ -69,6 +77,11 @@ protected boolean forbidPrivateIndexSettings() {
return false;
}
+ @Before
+ public void setup() {
+ asyncUploadMockFsRepo = true;
+ }
+
public void testCreateCloneIndex() {
Version version = VersionUtils.randomIndexCompatibleVersion(random());
int numPrimaryShards = randomIntBetween(1, 5);
@@ -140,6 +153,79 @@ public void testCreateCloneIndex() {
}
+ public void testCreateCloneIndexLowPriorityRateLimit() {
+ Version version = VersionUtils.randomIndexCompatibleVersion(random());
+ int numPrimaryShards = 1;
+ prepareCreate("source").setSettings(
+ Settings.builder().put(indexSettings()).put("number_of_shards", numPrimaryShards).put("index.version.created", version)
+ ).get();
+ final int docs = randomIntBetween(0, 128);
+ for (int i = 0; i < docs; i++) {
+ client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", MediaTypeRegistry.JSON).get();
+ }
+ ByteSizeValue shardSize = client().admin().indices().prepareStats("source").execute().actionGet().getShards()[0].getStats()
+ .getStore()
+ .size();
+ logger.info("Shard size is {}", shardSize);
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
+ // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
+ // to the require._name below.
+ ensureGreen();
+ // relocate all shards to one node such that we can merge it.
+ client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.blocks.write", true)).get();
+ ensureGreen();
+
+ // disable rebalancing to be able to capture the right stats. balancing can move the target primary
+ // making it hard to pin point the source shards.
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none"))
+ .get();
+ try {
+ // apply rate limiter
+ setLowPriorityUploadRate(REPOSITORY_NAME, "1kb");
+ assertAcked(
+ client().admin()
+ .indices()
+ .prepareResizeIndex("source", "target")
+ .setResizeType(ResizeType.CLONE)
+ .setSettings(Settings.builder().put("index.number_of_replicas", 0).putNull("index.blocks.write").build())
+ .get()
+ );
+ ensureGreen();
+ long uploadPauseTime = 0L;
+ for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
+ uploadPauseTime += repositoriesService.repository(REPOSITORY_NAME).getLowPriorityRemoteUploadThrottleTimeInNanos();
+ }
+ assertThat(uploadPauseTime, greaterThan(TimeValue.timeValueSeconds(randomIntBetween(5, 10)).nanos()));
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ } finally {
+ // clean up
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(
+ Settings.builder()
+ .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String) null)
+ .put(RecoverySettings.INDICES_INTERNAL_REMOTE_UPLOAD_TIMEOUT.getKey(), (String) null)
+ )
+ .get();
+ }
+ }
+
+ protected void setLowPriorityUploadRate(String repoName, String value) throws ExecutionException, InterruptedException {
+ GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName });
+ GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get();
+ RepositoryMetadata rmd = res.repositories().get(0);
+ Settings.Builder settings = Settings.builder()
+ .put("location", rmd.settings().get("location"))
+ .put("max_remote_low_priority_upload_bytes_per_sec", value);
+ assertAcked(client().admin().cluster().preparePutRepository(repoName).setType(rmd.type()).setSettings(settings).get());
+ }
+
public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException {
asyncUploadMockFsRepo = false;
Version version = VersionUtils.randomIndexCompatibleVersion(random());
diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java
index dbcb030d8a4f7..f4b5f112f5785 100644
--- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/ClusterRerouteIT.java
@@ -273,7 +273,8 @@ public void testDelayWithALargeAmountOfShards() throws Exception {
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_1));
// This might run slowly on older hardware
- ensureGreen(TimeValue.timeValueMinutes(2));
+ // In some case, the shards will be rebalanced back and forth, it seems like a very low probability bug.
+ ensureGreen(TimeValue.timeValueMinutes(2), false);
}
private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java
index b33d57ed43189..beed6e6846b46 100644
--- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java
@@ -539,18 +539,7 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana
assertEquals(originalClusterManager, currentClusterManager);
}
- // Will wait for all events to complete
- client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get();
-
- // Recommissioning the zone back to gracefully succeed the test once above tests succeeds
- DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(currentClusterManager).execute(
- DeleteDecommissionStateAction.INSTANCE,
- new DeleteDecommissionStateRequest()
- ).get();
- assertTrue(deleteDecommissionStateResponse.isAcknowledged());
-
- // will wait for cluster to stabilise with a timeout of 2 min as by then all nodes should have joined the cluster
- ensureStableCluster(15, TimeValue.timeValueMinutes(2));
+ deleteDecommissionStateAndWaitForStableCluster(currentClusterManager, 15);
}
public void testDecommissionFailedWhenDifferentAttributeAlreadyDecommissioned() throws Exception {
@@ -617,18 +606,7 @@ public void testDecommissionFailedWhenDifferentAttributeAlreadyDecommissioned()
)
);
- // Will wait for all events to complete
- client(node_in_c).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get();
-
- // Recommissioning the zone back to gracefully succeed the test once above tests succeeds
- DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(node_in_c).execute(
- DeleteDecommissionStateAction.INSTANCE,
- new DeleteDecommissionStateRequest()
- ).get();
- assertTrue(deleteDecommissionStateResponse.isAcknowledged());
-
- // will wait for cluster to stabilise with a timeout of 2 min as by then all nodes should have joined the cluster
- ensureStableCluster(6, TimeValue.timeValueMinutes(2));
+ deleteDecommissionStateAndWaitForStableCluster(node_in_c, 6);
}
public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionException, InterruptedException {
@@ -748,20 +726,7 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx
);
logger.info("--> Verified the decommissioned node has in_progress state.");
- // Will wait for all events to complete
- client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get();
- logger.info("--> Got LANGUID event");
- // Recommissioning the zone back to gracefully succeed the test once above tests succeeds
- DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(activeNode).execute(
- DeleteDecommissionStateAction.INSTANCE,
- new DeleteDecommissionStateRequest()
- ).get();
- assertTrue(deleteDecommissionStateResponse.isAcknowledged());
- logger.info("--> Deleting decommission done.");
-
- // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes)
- // as by then all nodes should have joined the cluster
- ensureStableCluster(6, TimeValue.timeValueSeconds(121));
+ deleteDecommissionStateAndWaitForStableCluster(activeNode, 6);
}
public void testDecommissionFailedWhenAttributeNotWeighedAway() throws Exception {
@@ -983,15 +948,7 @@ public void testDecommissionAcknowledgedIfWeightsNotSetForNonRoutingNode() throw
assertEquals(clusterState.nodes().getDataNodes().size(), 3);
assertEquals(clusterState.nodes().getClusterManagerNodes().size(), 2);
- // Recommissioning the zone back to gracefully succeed the test once above tests succeeds
- DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(dataNodes.get(0)).execute(
- DeleteDecommissionStateAction.INSTANCE,
- new DeleteDecommissionStateRequest()
- ).get();
- assertTrue(deleteDecommissionStateResponse.isAcknowledged());
-
- // will wait for cluster to stabilise with a timeout of 2 min as by then all nodes should have joined the cluster
- ensureStableCluster(6, TimeValue.timeValueMinutes(2));
+ deleteDecommissionStateAndWaitForStableCluster(dataNodes.get(0), 6);
}
public void testConcurrentDecommissionAction() throws Exception {
@@ -1019,7 +976,7 @@ public void testConcurrentDecommissionAction() throws Exception {
.build()
);
logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'");
- internalCluster().startNodes(
+ final String bZoneDataNode = internalCluster().startNodes(
Settings.builder()
.put(commonSettings)
.put("node.attr.zone", "a")
@@ -1035,7 +992,7 @@ public void testConcurrentDecommissionAction() throws Exception {
.put("node.attr.zone", "c")
.put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE))
.build()
- );
+ ).get(1);
ensureStableCluster(6);
ClusterHealthResponse health = client().admin()
@@ -1100,6 +1057,25 @@ public void testConcurrentDecommissionAction() throws Exception {
assertEquals(concurrentRuns, numRequestAcknowledged.get() + numRequestUnAcknowledged.get() + numRequestFailed.get());
assertEquals(concurrentRuns - 1, numRequestFailed.get());
assertEquals(1, numRequestAcknowledged.get() + numRequestUnAcknowledged.get());
+
+ deleteDecommissionStateAndWaitForStableCluster(bZoneDataNode, 6);
+ }
+
+ private void deleteDecommissionStateAndWaitForStableCluster(String activeNodeName, int expectedClusterSize) throws ExecutionException,
+ InterruptedException {
+ client(activeNodeName).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get();
+
+ // Recommissioning the zone back to gracefully succeed the test once above tests succeeds
+ DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(activeNodeName).execute(
+ DeleteDecommissionStateAction.INSTANCE,
+ new DeleteDecommissionStateRequest()
+ ).get();
+ assertTrue(deleteDecommissionStateResponse.isAcknowledged());
+ logger.info("--> Deleting decommission done.");
+
+ // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes)
+ // as by then all nodes should have joined the cluster
+ ensureStableCluster(expectedClusterSize, TimeValue.timeValueSeconds(121));
}
private static class WaitForFailedDecommissionState implements ClusterStateObserver.Listener {
diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java
index bc0557ddc2afa..6296608c64d37 100644
--- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java
@@ -55,7 +55,9 @@
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.node.DiscoveryNode;
import org.opensearch.cluster.routing.ShardRouting;
+import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.cluster.routing.UnassignedInfo;
+import org.opensearch.cluster.routing.allocation.AllocationDecision;
import org.opensearch.cluster.routing.allocation.ExistingShardsAllocator;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.settings.Settings;
@@ -98,6 +100,9 @@
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BooleanSupplier;
+import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static java.util.Collections.emptyMap;
@@ -105,8 +110,10 @@
import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING;
import static org.opensearch.cluster.health.ClusterHealthStatus.GREEN;
import static org.opensearch.cluster.health.ClusterHealthStatus.RED;
+import static org.opensearch.cluster.health.ClusterHealthStatus.YELLOW;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
+import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.opensearch.gateway.GatewayRecoveryTestUtils.corruptShard;
import static org.opensearch.gateway.GatewayRecoveryTestUtils.getDiscoveryNodes;
@@ -753,6 +760,7 @@ public void testMessyElectionsStillMakeClusterGoGreen() throws Exception {
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
.build()
);
@@ -843,6 +851,80 @@ public void testBatchModeDisabled() throws Exception {
ensureGreen("test");
}
+ public void testMultipleReplicaShardAssignmentWithDelayedAllocationAndDifferentNodeStartTimeInBatchMode() throws Exception {
+ internalCluster().startClusterManagerOnlyNodes(
+ 1,
+ Settings.builder().put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_BATCH_MODE.getKey(), true).build()
+ );
+ internalCluster().startDataOnlyNodes(6);
+ createIndex(
+ "test",
+ Settings.builder()
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 3)
+ .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "60m")
+ .build()
+ );
+ ensureGreen("test");
+
+ List nodesWithReplicaShards = findNodesWithShard(false);
+ Settings replicaNode0DataPathSettings = internalCluster().dataPathSettings(nodesWithReplicaShards.get(0));
+ Settings replicaNode1DataPathSettings = internalCluster().dataPathSettings(nodesWithReplicaShards.get(1));
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesWithReplicaShards.get(0)));
+ internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesWithReplicaShards.get(1)));
+
+ ensureStableCluster(5);
+
+ logger.info("--> explicitly triggering reroute");
+ ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
+ assertTrue(clusterRerouteResponse.isAcknowledged());
+
+ ClusterHealthResponse health = client().admin().cluster().health(Requests.clusterHealthRequest().timeout("5m")).actionGet();
+ assertFalse(health.isTimedOut());
+ assertEquals(YELLOW, health.getStatus());
+ assertEquals(2, health.getUnassignedShards());
+ // shard should be unassigned because of Allocation_Delayed
+ BooleanSupplier delayedShardAllocationStatusVerificationSupplier = () -> AllocationDecision.ALLOCATION_DELAYED.equals(
+ client().admin()
+ .cluster()
+ .prepareAllocationExplain()
+ .setIndex("test")
+ .setShard(0)
+ .setPrimary(false)
+ .get()
+ .getExplanation()
+ .getShardAllocationDecision()
+ .getAllocateDecision()
+ .getAllocationDecision()
+ );
+ waitUntil(delayedShardAllocationStatusVerificationSupplier, 2, TimeUnit.MINUTES);
+
+ logger.info("--> restarting the node 1");
+ internalCluster().startDataOnlyNode(
+ Settings.builder().put("node.name", nodesWithReplicaShards.get(0)).put(replicaNode0DataPathSettings).build()
+ );
+ clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
+ assertTrue(clusterRerouteResponse.isAcknowledged());
+ ensureStableCluster(6);
+ waitUntil(
+ () -> client().admin().cluster().health(Requests.clusterHealthRequest().timeout("5m")).actionGet().getActiveShards() == 3,
+ 2,
+ TimeUnit.MINUTES
+ );
+ health = client().admin().cluster().health(Requests.clusterHealthRequest().timeout("5m")).actionGet();
+ assertFalse(health.isTimedOut());
+ assertEquals(YELLOW, health.getStatus());
+ assertEquals(1, health.getUnassignedShards());
+ assertEquals(1, health.getDelayedUnassignedShards());
+ waitUntil(delayedShardAllocationStatusVerificationSupplier, 2, TimeUnit.MINUTES);
+ logger.info("--> restarting the node 0");
+ internalCluster().startDataOnlyNode(
+ Settings.builder().put("node.name", nodesWithReplicaShards.get(1)).put(replicaNode1DataPathSettings).build()
+ );
+ ensureStableCluster(7);
+ ensureGreen("test");
+ }
+
public void testNBatchesCreationAndAssignment() throws Exception {
// we will reduce batch size to 5 to make sure we have enough batches to test assignment
// Total number of primary shards = 50 (50 indices*1)
@@ -1293,4 +1375,14 @@ private void prepareIndex(String indexName, int numberOfPrimaryShards) {
index(indexName, "type", "1", Collections.emptyMap());
flush(indexName);
}
+
+ private List findNodesWithShard(final boolean primary) {
+ ClusterState state = client().admin().cluster().prepareState().get().getState();
+ List startedShards = state.routingTable().shardsWithState(ShardRoutingState.STARTED);
+ List requiredStartedShards = startedShards.stream()
+ .filter(startedShard -> startedShard.primary() == primary)
+ .collect(Collectors.toList());
+ Collections.shuffle(requiredStartedShards, random());
+ return requiredStartedShards.stream().map(shard -> state.nodes().get(shard.currentNodeId()).getName()).collect(Collectors.toList());
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java
index e96dedaa3e6a0..5074971ab1a1f 100644
--- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateCleanupManagerIT.java
@@ -108,6 +108,9 @@ public void testRemoteCleanupDeleteStale() throws Exception {
.add("cluster-state")
.add(getClusterState().metadata().clusterUUID());
BlobPath manifestContainerPath = baseMetadataPath.add("manifest");
+ RemoteClusterStateCleanupManager remoteClusterStateCleanupManager = internalCluster().getClusterManagerNodeInstance(
+ RemoteClusterStateCleanupManager.class
+ );
// set cleanup interval to 100 ms to make the test faster
ClusterUpdateSettingsResponse response = client().admin()
@@ -117,6 +120,7 @@ public void testRemoteCleanupDeleteStale() throws Exception {
.get();
assertTrue(response.isAcknowledged());
+ assertBusy(() -> assertEquals(100, remoteClusterStateCleanupManager.getStaleFileDeletionTask().getInterval().getMillis()));
assertBusy(() -> {
int manifestFiles = repository.blobStore().blobContainer(manifestContainerPath).listBlobsByPrefix("manifest").size();
@@ -128,7 +132,7 @@ public void testRemoteCleanupDeleteStale() throws Exception {
"Current number of manifest files: " + manifestFiles,
manifestFiles >= RETAINED_MANIFESTS && manifestFiles < RETAINED_MANIFESTS + 2 * SKIP_CLEANUP_STATE_CHANGES
);
- }, 500, TimeUnit.MILLISECONDS);
+ });
// disable the clean up to avoid race condition during shutdown
response = client().admin()
diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java
index ab2f0f0080566..f6c7355ea06f6 100644
--- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteClusterStateServiceIT.java
@@ -26,13 +26,13 @@
import java.util.function.Function;
import java.util.stream.Collectors;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.COORDINATION_METADATA;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.CUSTOM_METADATA;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.DELIMITER;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.METADATA_FILE_PREFIX;
import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.SETTING_METADATA;
-import static org.opensearch.gateway.remote.RemoteClusterStateService.TEMPLATES_METADATA;
+import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER;
+import static org.opensearch.gateway.remote.RemoteClusterStateUtils.METADATA_FILE_PREFIX;
+import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA;
+import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA;
+import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA;
+import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA;
@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
public class RemoteClusterStateServiceIT extends RemoteStoreBaseIntegTestCase {
diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
new file mode 100644
index 0000000000000..8e5193b650868
--- /dev/null
+++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java
@@ -0,0 +1,440 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.index.mapper;
+
+import org.opensearch.action.support.master.AcknowledgedResponse;
+import org.opensearch.common.Rounding;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.util.FeatureFlags;
+import org.opensearch.core.index.Index;
+import org.opensearch.core.xcontent.XContentBuilder;
+import org.opensearch.index.IndexService;
+import org.opensearch.index.compositeindex.CompositeIndexSettings;
+import org.opensearch.index.compositeindex.datacube.DateDimension;
+import org.opensearch.index.compositeindex.datacube.MetricStat;
+import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration;
+import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings;
+import org.opensearch.indices.IndicesService;
+import org.opensearch.test.OpenSearchIntegTestCase;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+
+import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
+import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
+
+/**
+ * Integration tests for star tree mapper
+ */
+public class StarTreeMapperIT extends OpenSearchIntegTestCase {
+ private static final String TEST_INDEX = "test";
+
+ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, boolean invalidMetric, boolean keywordDim) {
+ try {
+ return jsonBuilder().startObject()
+ .startObject("composite")
+ .startObject("startree-1")
+ .field("type", "star_tree")
+ .startObject("config")
+ .startArray("ordered_dimensions")
+ .startObject()
+ .field("name", "timestamp")
+ .endObject()
+ .startObject()
+ .field("name", getDim(invalidDim, keywordDim))
+ .endObject()
+ .endArray()
+ .startArray("metrics")
+ .startObject()
+ .field("name", getDim(invalidMetric, false))
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("properties")
+ .startObject("timestamp")
+ .field("type", "date")
+ .endObject()
+ .startObject("numeric_dv")
+ .field("type", "integer")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("numeric")
+ .field("type", "integer")
+ .field("doc_values", false)
+ .endObject()
+ .startObject("keyword_dv")
+ .field("type", "keyword")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("doc_values", false)
+ .endObject()
+ .endObject()
+ .endObject();
+ } catch (IOException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+ private static XContentBuilder createMaxDimTestMapping() {
+ try {
+ return jsonBuilder().startObject()
+ .startObject("composite")
+ .startObject("startree-1")
+ .field("type", "star_tree")
+ .startObject("config")
+ .startArray("ordered_dimensions")
+ .startObject()
+ .field("name", "timestamp")
+ .startArray("calendar_intervals")
+ .value("day")
+ .value("month")
+ .endArray()
+ .endObject()
+ .startObject()
+ .field("name", "dim2")
+ .endObject()
+ .startObject()
+ .field("name", "dim3")
+ .endObject()
+ .endArray()
+ .startArray("metrics")
+ .startObject()
+ .field("name", "dim2")
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("properties")
+ .startObject("timestamp")
+ .field("type", "date")
+ .endObject()
+ .startObject("dim2")
+ .field("type", "integer")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("dim3")
+ .field("type", "integer")
+ .field("doc_values", true)
+ .endObject()
+ .endObject()
+ .endObject();
+ } catch (IOException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+ private static XContentBuilder createTestMappingWithoutStarTree(boolean invalidDim, boolean invalidMetric, boolean keywordDim) {
+ try {
+ return jsonBuilder().startObject()
+ .startObject("properties")
+ .startObject("timestamp")
+ .field("type", "date")
+ .endObject()
+ .startObject("numeric_dv")
+ .field("type", "integer")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("numeric")
+ .field("type", "integer")
+ .field("doc_values", false)
+ .endObject()
+ .startObject("keyword_dv")
+ .field("type", "keyword")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("doc_values", false)
+ .endObject()
+ .endObject()
+ .endObject();
+ } catch (IOException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+ private static XContentBuilder createUpdateTestMapping(boolean changeDim, boolean sameStarTree) {
+ try {
+ return jsonBuilder().startObject()
+ .startObject("composite")
+ .startObject(sameStarTree ? "startree-1" : "startree-2")
+ .field("type", "star_tree")
+ .startObject("config")
+ .startArray("ordered_dimensions")
+ .startObject()
+ .field("name", "timestamp")
+ .endObject()
+ .startObject()
+ .field("name", changeDim ? "numeric_new" : getDim(false, false))
+ .endObject()
+ .endArray()
+ .startArray("metrics")
+ .startObject()
+ .field("name", getDim(false, false))
+ .endObject()
+ .endArray()
+ .endObject()
+ .endObject()
+ .endObject()
+ .startObject("properties")
+ .startObject("timestamp")
+ .field("type", "date")
+ .endObject()
+ .startObject("numeric_dv")
+ .field("type", "integer")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("numeric")
+ .field("type", "integer")
+ .field("doc_values", false)
+ .endObject()
+ .startObject("numeric_new")
+ .field("type", "integer")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("keyword_dv")
+ .field("type", "keyword")
+ .field("doc_values", true)
+ .endObject()
+ .startObject("keyword")
+ .field("type", "keyword")
+ .field("doc_values", false)
+ .endObject()
+ .endObject()
+ .endObject();
+ } catch (IOException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+ private static String getDim(boolean hasDocValues, boolean isKeyword) {
+ if (hasDocValues) {
+ return "numeric";
+ } else if (isKeyword) {
+ return "keyword";
+ }
+ return "numeric_dv";
+ }
+
+ @Override
+ protected Settings featureFlagSettings() {
+ return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.STAR_TREE_INDEX, "true").build();
+ }
+
+ @Before
+ public final void setupNodeSettings() {
+ Settings request = Settings.builder().put(CompositeIndexSettings.STAR_TREE_INDEX_ENABLED_SETTING.getKey(), true).build();
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get());
+ }
+
+ public void testValidCompositeIndex() {
+ prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get();
+ Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class);
+ for (IndicesService service : dataNodeInstances) {
+ final Index index = resolveIndex("test");
+ if (service.hasIndex(index)) {
+ IndexService indexService = service.indexService(index);
+ Set fts = indexService.mapperService().getCompositeFieldTypes();
+
+ for (CompositeMappedFieldType ft : fts) {
+ assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType);
+ StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft;
+ assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField());
+ assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension);
+ DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0);
+ List expectedTimeUnits = Arrays.asList(
+ Rounding.DateTimeUnit.MINUTES_OF_HOUR,
+ Rounding.DateTimeUnit.HOUR_OF_DAY
+ );
+ assertEquals(expectedTimeUnits, dateDim.getIntervals());
+ assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField());
+ assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField());
+ List expectedMetrics = Arrays.asList(
+ MetricStat.AVG,
+ MetricStat.COUNT,
+ MetricStat.SUM,
+ MetricStat.MAX,
+ MetricStat.MIN
+ );
+ assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics());
+ assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs());
+ assertEquals(
+ StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP,
+ starTreeFieldType.getStarTreeConfig().getBuildMode()
+ );
+ assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims());
+ }
+ }
+ }
+ }
+
+ public void testUpdateIndexWithAdditionOfStarTree() {
+ prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get();
+
+ IllegalArgumentException ex = expectThrows(
+ IllegalArgumentException.class,
+ () -> client().admin().indices().preparePutMapping(TEST_INDEX).setSource(createUpdateTestMapping(false, false)).get()
+ );
+ assertEquals("Index cannot have more than [1] star tree fields", ex.getMessage());
+ }
+
+ public void testUpdateIndexWithNewerStarTree() {
+ prepareCreate(TEST_INDEX).setMapping(createTestMappingWithoutStarTree(false, false, false)).get();
+
+ IllegalArgumentException ex = expectThrows(
+ IllegalArgumentException.class,
+ () -> client().admin().indices().preparePutMapping(TEST_INDEX).setSource(createUpdateTestMapping(false, false)).get()
+ );
+ assertEquals(
+ "Composite fields must be specified during index creation, addition of new composite fields during update is not supported",
+ ex.getMessage()
+ );
+ }
+
+ public void testUpdateIndexWhenMappingIsDifferent() {
+ prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get();
+
+ // update some field in the mapping
+ IllegalArgumentException ex = expectThrows(
+ IllegalArgumentException.class,
+ () -> client().admin().indices().preparePutMapping(TEST_INDEX).setSource(createUpdateTestMapping(true, true)).get()
+ );
+ assertTrue(ex.getMessage().contains("Cannot update parameter [config] from"));
+ }
+
+ public void testUpdateIndexWhenMappingIsSame() {
+ prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, false)).get();
+
+ // update some field in the mapping
+ AcknowledgedResponse putMappingResponse = client().admin()
+ .indices()
+ .preparePutMapping(TEST_INDEX)
+ .setSource(createMinimalTestMapping(false, false, false))
+ .get();
+ assertAcked(putMappingResponse);
+
+ Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class);
+ for (IndicesService service : dataNodeInstances) {
+ final Index index = resolveIndex("test");
+ if (service.hasIndex(index)) {
+ IndexService indexService = service.indexService(index);
+ Set fts = indexService.mapperService().getCompositeFieldTypes();
+
+ for (CompositeMappedFieldType ft : fts) {
+ assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType);
+ StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft;
+ assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField());
+ assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension);
+ DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0);
+ List expectedTimeUnits = Arrays.asList(
+ Rounding.DateTimeUnit.MINUTES_OF_HOUR,
+ Rounding.DateTimeUnit.HOUR_OF_DAY
+ );
+ assertEquals(expectedTimeUnits, dateDim.getIntervals());
+ assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField());
+ assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField());
+ List expectedMetrics = Arrays.asList(
+ MetricStat.AVG,
+ MetricStat.COUNT,
+ MetricStat.SUM,
+ MetricStat.MAX,
+ MetricStat.MIN
+ );
+ assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics());
+ assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs());
+ assertEquals(
+ StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP,
+ starTreeFieldType.getStarTreeConfig().getBuildMode()
+ );
+ assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims());
+ }
+ }
+ }
+ }
+
+ public void testInvalidDimCompositeIndex() {
+ IllegalArgumentException ex = expectThrows(
+ IllegalArgumentException.class,
+ () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(true, false, false)).get()
+ );
+ assertEquals(
+ "Aggregations not supported for the dimension field [numeric] with field type [integer] as part of star tree field",
+ ex.getMessage()
+ );
+ }
+
+ public void testMaxDimsCompositeIndex() {
+ MapperParsingException ex = expectThrows(
+ MapperParsingException.class,
+ () -> prepareCreate(TEST_INDEX).setMapping(createMaxDimTestMapping())
+ .setSettings(Settings.builder().put(StarTreeIndexSettings.STAR_TREE_MAX_DIMENSIONS_SETTING.getKey(), 2))
+ .get()
+ );
+ assertEquals(
+ "Failed to parse mapping [_doc]: ordered_dimensions cannot have more than 2 dimensions for star tree field [startree-1]",
+ ex.getMessage()
+ );
+ }
+
+ public void testMaxCalendarIntervalsCompositeIndex() {
+ MapperParsingException ex = expectThrows(
+ MapperParsingException.class,
+ () -> prepareCreate(TEST_INDEX).setMapping(createMaxDimTestMapping())
+ .setSettings(Settings.builder().put(StarTreeIndexSettings.STAR_TREE_MAX_DATE_INTERVALS_SETTING.getKey(), 1))
+ .get()
+ );
+ assertEquals(
+ "Failed to parse mapping [_doc]: At most [1] calendar intervals are allowed in dimension [timestamp]",
+ ex.getMessage()
+ );
+ }
+
+ public void testUnsupportedDim() {
+ MapperParsingException ex = expectThrows(
+ MapperParsingException.class,
+ () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, false, true)).get()
+ );
+ assertEquals(
+ "Failed to parse mapping [_doc]: unsupported field type associated with dimension [keyword] as part of star tree field [startree-1]",
+ ex.getMessage()
+ );
+ }
+
+ public void testInvalidMetric() {
+ IllegalArgumentException ex = expectThrows(
+ IllegalArgumentException.class,
+ () -> prepareCreate(TEST_INDEX).setMapping(createMinimalTestMapping(false, true, false)).get()
+ );
+ assertEquals(
+ "Aggregations not supported for the metrics field [numeric] with field type [integer] as part of star tree field",
+ ex.getMessage()
+ );
+ }
+
+ @After
+ public final void cleanupNodeSettings() {
+ assertAcked(
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setPersistentSettings(Settings.builder().putNull("*"))
+ .setTransientSettings(Settings.builder().putNull("*"))
+ );
+ }
+}
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java
index 0539f96e429c1..28bac3c7441b6 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java
@@ -13,6 +13,7 @@
import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
+import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.opensearch.action.admin.indices.stats.CommonStatsFlags;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.client.Client;
@@ -23,12 +24,14 @@
import org.opensearch.common.cache.stats.ImmutableCacheStats;
import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.FeatureFlags;
import org.opensearch.common.xcontent.XContentFactory;
import org.opensearch.common.xcontent.XContentHelper;
import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.core.xcontent.ToXContent;
import org.opensearch.core.xcontent.XContentBuilder;
+import org.opensearch.index.IndexSettings;
import org.opensearch.index.cache.request.RequestCacheStats;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.test.OpenSearchIntegTestCase;
@@ -266,10 +269,14 @@ private void startIndex(Client client, String indexName) throws InterruptedExcep
.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
)
.get()
);
indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello"));
+ // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
+ ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(indexName).setFlush(true).get();
ensureSearchable(indexName);
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheCleanupIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheCleanupIT.java
new file mode 100644
index 0000000000000..988ea99130b13
--- /dev/null
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheCleanupIT.java
@@ -0,0 +1,732 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*
+ * Modifications Copyright OpenSearch Contributors. See
+ * GitHub history for details.
+ */
+
+package org.opensearch.indices;
+
+import org.opensearch.action.admin.cluster.node.stats.NodeStats;
+import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
+import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
+import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
+import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
+import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest;
+import org.opensearch.action.search.SearchResponse;
+import org.opensearch.client.Client;
+import org.opensearch.cluster.metadata.IndexMetadata;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.unit.TimeValue;
+import org.opensearch.index.IndexNotFoundException;
+import org.opensearch.index.IndexSettings;
+import org.opensearch.index.MergePolicyProvider;
+import org.opensearch.index.cache.request.RequestCacheStats;
+import org.opensearch.index.query.QueryBuilders;
+import org.opensearch.plugins.Plugin;
+import org.opensearch.test.InternalSettingsPlugin;
+import org.opensearch.test.OpenSearchIntegTestCase;
+import org.opensearch.test.hamcrest.OpenSearchAssertions;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.concurrent.TimeUnit;
+
+import static org.opensearch.indices.IndicesRequestCache.INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING;
+import static org.opensearch.indices.IndicesService.INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY;
+import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
+import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse;
+import static org.hamcrest.Matchers.equalTo;
+
+@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false)
+public class IndicesRequestCacheCleanupIT extends OpenSearchIntegTestCase {
+
+ private static final long MAX_ITERATIONS = 5;
+
+ @Override
+ protected Collection> nodePlugins() {
+ return Arrays.asList(InternalSettingsPlugin.class);
+ }
+
+ public void testCacheWithInvalidation() throws Exception {
+ Client client = client();
+ String index = "index";
+ setupIndex(client, index);
+ ensureSearchable(index);
+ // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
+ forceMerge(client, index);
+ SearchResponse resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get();
+ assertSearchResponse(resp);
+ OpenSearchAssertions.assertAllSuccessful(resp);
+ assertThat(resp.getHits().getTotalHits().value, equalTo(1L));
+
+ assertCacheState(client, index, 0, 1);
+ // Index but don't refresh
+ indexRandom(false, client.prepareIndex(index).setSource("k", "hello2"));
+ resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get();
+ assertSearchResponse(resp);
+ // Should expect hit as here as refresh didn't happen
+ assertCacheState(client, index, 1, 1);
+
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index));
+ // Explicit refresh would invalidate cache
+ refreshAndWaitForReplication();
+ // Hit same query again
+ resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get();
+ assertSearchResponse(resp);
+ // Should expect miss as key has changed due to change in IndexReader.CacheKey (due to refresh)
+ assertCacheState(client, index, 1, 2);
+ }
+
+ // calling cache clear api, when staleness threshold is lower than staleness, it should clean the stale keys from cache
+ public void testCacheClearAPIRemovesStaleKeysWhenStalenessThresholdIsLow() throws Exception {
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ // Set interval much larger than test timeout to effectively disable it
+ TimeValue.timeValueDays(1)
+ )
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1 > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(index2);
+ client.admin().indices().clearCache(clearIndicesCacheRequest).actionGet();
+
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(1, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index 2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }
+
+ // when staleness threshold is lower than staleness, it should clean the stale keys from cache
+ public void testStaleKeysCleanupWithLowThreshold() throws Exception {
+ int cacheCleanIntervalInMillis = 1;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1 > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ // create 1 stale key
+ indexRandom(false, client.prepareIndex(index2).setId("1").setSource("d", "hello"));
+ forceMerge(client, index2);
+ // sleep until cache cleaner would have cleaned up the stale key from index 2
+ assertBusy(() -> {
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(2, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index 2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ // sleep until cache cleaner would have cleaned up the stale key from index 2
+ }
+
+ // when staleness threshold is equal to staleness, it should clean the stale keys from cache
+ public void testCacheCleanupOnEqualStalenessAndThreshold() throws Exception {
+ int cacheCleanIntervalInMillis = 1;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.33)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1 > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ // create 1 stale key
+ indexRandom(false, client.prepareIndex(index2).setId("1").setSource("d", "hello"));
+ forceMerge(client, index2);
+ // sleep until cache cleaner would have cleaned up the stale key from index 2
+ assertBusy(() -> {
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(2, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index 2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // when staleness threshold is higher than staleness, it should NOT clean the cache
+ public void testCacheCleanupSkipsWithHighStalenessThreshold() throws Exception {
+ int cacheCleanIntervalInMillis = 1;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1 > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ // force refresh so that it creates 1 stale key
+ flushAndRefresh(index2);
+ // sleep until cache cleaner would have cleaned up the stale key from index 2
+ assertBusy(() -> {
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(1, getSegmentCount(client, index2));
+ // cache cleaner should NOT have cleaned up the stale key from index 2
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // when staleness threshold is explicitly set to 0, cache cleaner regularly cleans up stale keys.
+ public void testCacheCleanupOnZeroStalenessThreshold() throws Exception {
+ int cacheCleanIntervalInMillis = 50;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create 10 index1 cache entries
+ for (int i = 1; i <= 10; i++) {
+ long cacheSizeBefore = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ createCacheEntry(client, index1, "hello" + i);
+ assertCacheState(client, index1, 0, i);
+ long cacheSizeAfter = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(cacheSizeAfter > cacheSizeBefore);
+ }
+
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ // create 1 stale key
+ indexRandom(false, client.prepareIndex(index2).setId("1").setSource("d", "hello"));
+ forceMerge(client, index2);
+ // sleep until cache cleaner would have cleaned up the stale key from index 2
+ assertBusy(() -> {
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(2, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index 2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // when staleness threshold is not explicitly set, cache cleaner regularly cleans up stale keys
+ public void testStaleKeysRemovalWithoutExplicitThreshold() throws Exception {
+ int cacheCleanIntervalInMillis = 1;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ String index1 = "index1";
+ String index2 = "index2";
+ Client client = client(node);
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1 > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ // force refresh so that it creates 1 stale key
+ indexRandom(false, client.prepareIndex(index2).setId("1").setSource("d", "hello"));
+ forceMerge(client, index2);
+ // sleep until cache cleaner would have cleaned up the stale key from index 2
+ assertBusy(() -> {
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(2, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index 2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // when cache cleaner interval setting is not set, cache cleaner is configured appropriately with the fall-back setting
+ public void testCacheCleanupWithDefaultSettings() throws Exception {
+ int cacheCleanIntervalInMillis = 1;
+ String node = internalCluster().startNode(
+ Settings.builder().put(INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY, TimeValue.timeValueMillis(cacheCleanIntervalInMillis))
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1 > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ // create 1 stale key
+ indexRandom(false, client.prepareIndex(index2).setId("1").setSource("d", "hello"));
+ forceMerge(client, index2);
+ // sleep until cache cleaner would have cleaned up the stale key from index 2
+ assertBusy(() -> {
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(2, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index 2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // staleness threshold updates flows through to the cache cleaner
+ public void testDynamicStalenessThresholdUpdate() throws Exception {
+ int cacheCleanIntervalInMillis = 1;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1 > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ assertTrue(getRequestCacheStats(client, index1).getMemorySizeInBytes() > memorySizeForIndex1);
+
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(finalMemorySizeForIndex1 > 0);
+
+ // create 1 stale key
+ indexRandom(false, client.prepareIndex(index2).setId("1").setSource("d", "hello"));
+ forceMerge(client, index2);
+ assertBusy(() -> {
+ // cache cleaner should NOT have cleaned up the stale key from index 2
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+
+ // Update indices.requests.cache.cleanup.staleness_threshold to "10%"
+ ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
+ updateSettingsRequest.persistentSettings(Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), 0.10));
+ assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
+
+ assertBusy(() -> {
+ // assert segment counts stay the same
+ assertEquals(1, getSegmentCount(client, index1));
+ assertEquals(2, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index 2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should NOT have cleaned from index 1
+ assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // staleness threshold dynamic updates should throw exceptions on invalid input
+ public void testInvalidStalenessThresholdUpdateThrowsException() throws Exception {
+ // Update indices.requests.cache.cleanup.staleness_threshold to "10%" with illegal argument
+ assertThrows("Ratio should be in [0-1.0]", IllegalArgumentException.class, () -> {
+ ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
+ updateSettingsRequest.persistentSettings(
+ Settings.builder().put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 10)
+ );
+ client().admin().cluster().updateSettings(updateSettingsRequest).actionGet();
+ });
+ }
+
+ // closing the Index after caching will clean up from Indices Request Cache
+ public void testCacheClearanceAfterIndexClosure() throws Exception {
+ int cacheCleanIntervalInMillis = 100;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index = "index";
+ setupIndex(client, index);
+
+ // assert there are no entries in the cache for index
+ assertEquals(0, getRequestCacheStats(client, index).getMemorySizeInBytes());
+ // assert there are no entries in the cache from other indices in the node
+ assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
+ // create first cache entry in index
+ createCacheEntry(client, index, "hello");
+ assertCacheState(client, index, 0, 1);
+ assertTrue(getRequestCacheStats(client, index).getMemorySizeInBytes() > 0);
+ assertTrue(getNodeCacheStats(client).getMemorySizeInBytes() > 0);
+
+ // close index
+ assertAcked(client.admin().indices().prepareClose(index));
+ // request cache stats cannot be access since Index should be closed
+ try {
+ getRequestCacheStats(client, index);
+ } catch (Exception e) {
+ assert (e instanceof IndexClosedException);
+ }
+ // sleep until cache cleaner would have cleaned up the stale key from index
+ assertBusy(() -> {
+ // cache cleaner should have cleaned up the stale keys from index
+ assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // deleting the Index after caching will clean up from Indices Request Cache
+ public void testCacheCleanupAfterIndexDeletion() throws Exception {
+ int cacheCleanIntervalInMillis = 100;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index = "index";
+ setupIndex(client, index);
+
+ // assert there are no entries in the cache for index
+ assertEquals(0, getRequestCacheStats(client, index).getMemorySizeInBytes());
+ // assert there are no entries in the cache from other indices in the node
+ assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
+ // create first cache entry in index
+ createCacheEntry(client, index, "hello");
+ assertCacheState(client, index, 0, 1);
+ assertTrue(getRequestCacheStats(client, index).getMemorySizeInBytes() > 0);
+ assertTrue(getNodeCacheStats(client).getMemorySizeInBytes() > 0);
+
+ // delete index
+ assertAcked(client.admin().indices().prepareDelete(index));
+ // request cache stats cannot be access since Index should be deleted
+ try {
+ getRequestCacheStats(client, index);
+ } catch (Exception e) {
+ assert (e instanceof IndexNotFoundException);
+ }
+
+ // sleep until cache cleaner would have cleaned up the stale key from index
+ assertBusy(() -> {
+ // cache cleaner should have cleaned up the stale keys from index
+ assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ // when staleness threshold is lower than staleness, it should clean the cache from all indices having stale keys
+ public void testStaleKeysCleanupWithMultipleIndices() throws Exception {
+ int cacheCleanIntervalInMillis = 10;
+ String node = internalCluster().startNode(
+ Settings.builder()
+ .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
+ .put(
+ IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
+ TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
+ )
+ );
+ Client client = client(node);
+ String index1 = "index1";
+ String index2 = "index2";
+ setupIndex(client, index1);
+ setupIndex(client, index2);
+
+ // assert cache is empty for index1
+ assertEquals(0, getRequestCacheStats(client, index1).getMemorySizeInBytes());
+ // create first cache entry in index1
+ createCacheEntry(client, index1, "hello");
+ assertCacheState(client, index1, 0, 1);
+ long memorySizeForIndex1With1Entries = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1With1Entries > 0);
+
+ // create second cache entry in index1
+ createCacheEntry(client, index1, "there");
+ assertCacheState(client, index1, 0, 2);
+ long memorySizeForIndex1With2Entries = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ assertTrue(memorySizeForIndex1With2Entries > memorySizeForIndex1With1Entries);
+
+ // assert cache is empty for index2
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // create first cache entry in index2
+ createCacheEntry(client, index2, "hello");
+ assertCacheState(client, index2, 0, 1);
+ assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
+
+ // invalidate the cache for index1
+ indexRandom(false, client.prepareIndex(index1).setId("1").setSource("d", "hello"));
+ forceMerge(client, index1);
+ // Assert cache is cleared up
+ assertBusy(
+ () -> { assertEquals(0, getRequestCacheStats(client, index1).getMemorySizeInBytes()); },
+ cacheCleanIntervalInMillis * MAX_ITERATIONS,
+ TimeUnit.MILLISECONDS
+ );
+
+ // invalidate the cache for index2
+ indexRandom(false, client.prepareIndex(index2).setId("1").setSource("d", "hello"));
+ forceMerge(client, index2);
+
+ // create another cache entry in index 1 same as memorySizeForIndex1With1Entries, this should not be cleaned up.
+ createCacheEntry(client, index1, "hello");
+
+ // sleep until cache cleaner would have cleaned up the stale key from index2
+ assertBusy(() -> {
+ // assert segment counts stay the same
+ assertEquals(2, getSegmentCount(client, index1));
+ assertEquals(2, getSegmentCount(client, index2));
+ // cache cleaner should have cleaned up the stale key from index2 and hence cache should be empty
+ assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
+ // cache cleaner should have only cleaned up the stale entities for index1
+ long currentMemorySizeInBytesForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
+ // assert the memory size of index1 to only contain 1 entry added after flushAndRefresh
+ assertEquals(memorySizeForIndex1With1Entries, currentMemorySizeInBytesForIndex1);
+ }, cacheCleanIntervalInMillis * MAX_ITERATIONS, TimeUnit.MILLISECONDS);
+ }
+
+ private void setupIndex(Client client, String index) throws Exception {
+ assertAcked(
+ client.admin()
+ .indices()
+ .prepareCreate(index)
+ .setMapping("k", "type=keyword")
+ .setSettings(
+ Settings.builder()
+ .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
+ // Disable background segment merges invalidating the cache
+ .put(MergePolicyProvider.INDEX_MERGE_ENABLED, false)
+ )
+ .get()
+ );
+ indexRandom(false, client.prepareIndex(index).setSource("k", "hello"));
+ indexRandom(false, client.prepareIndex(index).setSource("k", "there"));
+ ensureSearchable(index);
+ forceMerge(client, index);
+ }
+
+ private int getSegmentCount(Client client, String indexName) {
+ return client.admin()
+ .indices()
+ .segments(new IndicesSegmentsRequest(indexName))
+ .actionGet()
+ .getIndices()
+ .get(indexName)
+ .getShards()
+ .get(0)
+ .getShards()[0].getSegments()
+ .size();
+ }
+
+ private void forceMerge(Client client, String index) {
+ ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get();
+ OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
+ refreshAndWaitForReplication();
+ }
+
+ private void createCacheEntry(Client client, String index, String value) {
+ SearchResponse resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", value)).get();
+ assertSearchResponse(resp);
+ OpenSearchAssertions.assertAllSuccessful(resp);
+ }
+
+ private static void assertCacheState(Client client, String index, long expectedHits, long expectedMisses) {
+ RequestCacheStats requestCacheStats = getRequestCacheStats(client, index);
+ // Check the hit count and miss count together so if they are not
+ // correct we can see both values
+ assertEquals(
+ Arrays.asList(expectedHits, expectedMisses, 0L),
+ Arrays.asList(requestCacheStats.getHitCount(), requestCacheStats.getMissCount(), requestCacheStats.getEvictions())
+ );
+
+ }
+
+ private static RequestCacheStats getRequestCacheStats(Client client, String index) {
+ return client.admin().indices().prepareStats(index).setRequestCache(true).get().getTotal().getRequestCache();
+ }
+
+ private static RequestCacheStats getNodeCacheStats(Client client) {
+ NodesStatsResponse stats = client.admin().cluster().prepareNodesStats().execute().actionGet();
+ for (NodeStats stat : stats.getNodes()) {
+ if (stat.getNode().isDataNode()) {
+ return stat.getIndices().getRequestCache();
+ }
+ }
+ return null;
+ }
+}
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java
index 9888d2d8abd98..09d5c208a8756 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java
@@ -37,7 +37,6 @@
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.node.stats.NodeStats;
import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse;
-import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.opensearch.action.admin.indices.alias.Alias;
import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
import org.opensearch.action.admin.indices.forcemerge.ForceMergeResponse;
@@ -55,7 +54,7 @@
import org.opensearch.core.index.Index;
import org.opensearch.core.index.shard.ShardId;
import org.opensearch.env.NodeEnvironment;
-import org.opensearch.index.IndexNotFoundException;
+import org.opensearch.index.IndexSettings;
import org.opensearch.index.cache.request.RequestCacheStats;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
@@ -75,13 +74,10 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
-import java.util.concurrent.TimeUnit;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
import static org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING;
-import static org.opensearch.indices.IndicesRequestCache.INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING;
-import static org.opensearch.indices.IndicesService.INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY;
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram;
import static org.opensearch.search.aggregations.AggregationBuilders.dateRange;
@@ -126,6 +122,8 @@ public void testCacheAggs() throws Exception {
.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
)
.get()
);
@@ -135,6 +133,8 @@ public void testCacheAggs() throws Exception {
client.prepareIndex(index).setSource("f", "2014-05-13T00:00:00.000Z")
);
ensureSearchable(index);
+ // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
+ forceMerge(client, index);
// This is not a random example: serialization with time zones writes shared strings
// which used to not work well with the query cache because of the handles stream output
@@ -197,6 +197,8 @@ public void testQueryRewrite() throws Exception {
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 5)
.put("index.number_of_routing_shards", 5)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
)
.get()
);
@@ -214,12 +216,8 @@ public void testQueryRewrite() throws Exception {
);
ensureSearchable(index);
assertCacheState(client, index, 0, 0);
-
// Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
- ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get();
- OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
- refreshAndWaitForReplication();
- ensureSearchable(index);
+ forceMerge(client, index);
assertCacheState(client, index, 0, 0);
@@ -268,6 +266,8 @@ public void testQueryRewriteMissingValues() throws Exception {
.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
)
.get()
);
@@ -287,10 +287,7 @@ public void testQueryRewriteMissingValues() throws Exception {
assertCacheState(client, index, 0, 0);
// Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
- ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get();
- OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
- refreshAndWaitForReplication();
- ensureSearchable(index);
+ forceMerge(client, index);
assertCacheState(client, index, 0, 0);
@@ -335,6 +332,8 @@ public void testQueryRewriteDates() throws Exception {
.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
)
.get()
);
@@ -354,10 +353,7 @@ public void testQueryRewriteDates() throws Exception {
assertCacheState(client, index, 0, 0);
// Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
- ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get();
- OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
- refreshAndWaitForReplication();
- ensureSearchable(index);
+ forceMerge(client, index);
assertCacheState(client, index, 0, 0);
@@ -399,6 +395,8 @@ public void testQueryRewriteDatesWithNow() throws Exception {
.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
.build();
assertAcked(client.admin().indices().prepareCreate("index-1").setMapping("d", "type=date").setSettings(settings).get());
assertAcked(client.admin().indices().prepareCreate("index-2").setMapping("d", "type=date").setSettings(settings).get());
@@ -480,6 +478,7 @@ public void testCanCache() throws Exception {
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
.put("index.number_of_routing_shards", 2)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
.build();
String index = "index";
assertAcked(client.admin().indices().prepareCreate(index).setMapping("s", "type=date").setSettings(settings).get());
@@ -499,10 +498,7 @@ public void testCanCache() throws Exception {
assertCacheState(client, index, 0, 0);
// Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
- ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get();
- OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
- refreshAndWaitForReplication();
- ensureSearchable(index);
+ forceMerge(client, index);
assertCacheState(client, index, 0, 0);
@@ -631,7 +627,6 @@ public void testCacheWithFilteredAlias() throws InterruptedException {
assertCacheState(client, index, 2, 2);
}
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/11374")
public void testProfileDisableCache() throws Exception {
Client client = client();
String index = "index";
@@ -645,11 +640,15 @@ public void testProfileDisableCache() throws Exception {
.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
)
.get()
);
indexRandom(true, client.prepareIndex(index).setSource("k", "hello"));
ensureSearchable(index);
+ // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
+ forceMerge(client, index);
int expectedHits = 0;
int expectedMisses = 0;
@@ -674,563 +673,6 @@ public void testProfileDisableCache() throws Exception {
}
}
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/12308")
- public void testCacheWithInvalidation() throws Exception {
- Client client = client();
- String index = "index";
- assertAcked(
- client.admin()
- .indices()
- .prepareCreate(index)
- .setMapping("k", "type=keyword")
- .setSettings(
- Settings.builder()
- .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
- .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
- .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
- .put("index.refresh_interval", -1)
- )
- .get()
- );
- indexRandom(true, client.prepareIndex(index).setSource("k", "hello"));
- ensureSearchable(index);
- SearchResponse resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get();
- assertSearchResponse(resp);
- OpenSearchAssertions.assertAllSuccessful(resp);
- assertThat(resp.getHits().getTotalHits().value, equalTo(1L));
-
- assertCacheState(client, index, 0, 1);
- // Index but don't refresh
- indexRandom(false, client.prepareIndex(index).setSource("k", "hello2"));
- resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get();
- assertSearchResponse(resp);
- // Should expect hit as here as refresh didn't happen
- assertCacheState(client, index, 1, 1);
-
- // Explicit refresh would invalidate cache
- refreshAndWaitForReplication();
- // Hit same query again
- resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get();
- assertSearchResponse(resp);
- // Should expect miss as key has changed due to change in IndexReader.CacheKey (due to refresh)
- assertCacheState(client, index, 1, 2);
- }
-
- // calling cache clear api, when staleness threshold is lower than staleness, it should clean the stale keys from cache
- public void testCacheClearAPIRemovesStaleKeysWhenStalenessThresholdIsLow() throws Exception {
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- // setting intentionally high to avoid cache cleaner interfering
- TimeValue.timeValueMillis(300)
- )
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1 > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- ClearIndicesCacheRequest clearIndicesCacheRequest = new ClearIndicesCacheRequest(index2);
- client.admin().indices().clearCache(clearIndicesCacheRequest).actionGet();
-
- // cache cleaner should have cleaned up the stale key from index 2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }
-
- // when staleness threshold is lower than staleness, it should clean the stale keys from cache
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13540")
- public void testStaleKeysCleanupWithLowThreshold() throws Exception {
- int cacheCleanIntervalInMillis = 1;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1 > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- // force refresh so that it creates 1 stale key
- flushAndRefresh(index2);
- // sleep until cache cleaner would have cleaned up the stale key from index 2
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale key from index 2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- // sleep until cache cleaner would have cleaned up the stale key from index 2
- }
-
- // when staleness threshold is equal to staleness, it should clean the stale keys from cache
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13503")
- public void testCacheCleanupOnEqualStalenessAndThreshold() throws Exception {
- int cacheCleanIntervalInMillis = 1;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.33)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1 > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- // force refresh so that it creates 1 stale key
- flushAndRefresh(index2);
- // sleep until cache cleaner would have cleaned up the stale key from index 2
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale key from index 2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // when staleness threshold is higher than staleness, it should NOT clean the cache
- public void testCacheCleanupSkipsWithHighStalenessThreshold() throws Exception {
- int cacheCleanIntervalInMillis = 1;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1 > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- // force refresh so that it creates 1 stale key
- flushAndRefresh(index2);
- // sleep until cache cleaner would have cleaned up the stale key from index 2
- assertBusy(() -> {
- // cache cleaner should NOT have cleaned up the stale key from index 2
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // when staleness threshold is explicitly set to 0, cache cleaner regularly cleans up stale keys.
- public void testCacheCleanupOnZeroStalenessThreshold() throws Exception {
- int cacheCleanIntervalInMillis = 50;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create 10 index1 cache entries
- for (int i = 1; i <= 10; i++) {
- long cacheSizeBefore = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- createCacheEntry(client, index1, "hello" + i);
- assertCacheState(client, index1, 0, i);
- long cacheSizeAfter = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(cacheSizeAfter > cacheSizeBefore);
- }
-
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- // force refresh so that it creates 1 stale key
- flushAndRefresh(index2);
- // sleep until cache cleaner would have cleaned up the stale key from index 2
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale key from index 2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // when staleness threshold is not explicitly set, cache cleaner regularly cleans up stale keys
- public void testStaleKeysRemovalWithoutExplicitThreshold() throws Exception {
- int cacheCleanIntervalInMillis = 1;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- String index1 = "index1";
- String index2 = "index2";
- Client client = client(node);
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1 > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- // force refresh so that it creates 1 stale key
- flushAndRefresh(index2);
- // sleep until cache cleaner would have cleaned up the stale key from index 2
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale key from index 2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // when cache cleaner interval setting is not set, cache cleaner is configured appropriately with the fall-back setting
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13711")
- public void testCacheCleanupWithDefaultSettings() throws Exception {
- int cacheCleanIntervalInMillis = 1;
- String node = internalCluster().startNode(
- Settings.builder().put(INDICES_CACHE_CLEANUP_INTERVAL_SETTING_KEY, TimeValue.timeValueMillis(cacheCleanIntervalInMillis))
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1 > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(finalMemorySizeForIndex1 > memorySizeForIndex1);
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- // force refresh so that it creates 1 stale key
- flushAndRefresh(index2);
- // sleep until cache cleaner would have cleaned up the stale key from index 2
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale key from index 2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // staleness threshold updates flows through to the cache cleaner
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13949")
- public void testDynamicStalenessThresholdUpdate() throws Exception {
- int cacheCleanIntervalInMillis = 1;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.90)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1 > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- assertTrue(getRequestCacheStats(client, index1).getMemorySizeInBytes() > memorySizeForIndex1);
-
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- long finalMemorySizeForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(finalMemorySizeForIndex1 > 0);
-
- // force refresh so that it creates 1 stale key
- flushAndRefresh(index2);
- assertBusy(() -> {
- // cache cleaner should NOT have cleaned up the stale key from index 2
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
-
- // Update indices.requests.cache.cleanup.staleness_threshold to "10%"
- ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
- updateSettingsRequest.persistentSettings(Settings.builder().put(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING.getKey(), 0.10));
- assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
-
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale key from index 2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should NOT have cleaned from index 1
- assertEquals(finalMemorySizeForIndex1, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // staleness threshold dynamic updates should throw exceptions on invalid input
- public void testInvalidStalenessThresholdUpdateThrowsException() throws Exception {
- // Update indices.requests.cache.cleanup.staleness_threshold to "10%" with illegal argument
- assertThrows("Ratio should be in [0-1.0]", IllegalArgumentException.class, () -> {
- ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
- updateSettingsRequest.persistentSettings(
- Settings.builder().put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 10)
- );
- client().admin().cluster().updateSettings(updateSettingsRequest).actionGet();
- });
- }
-
- // closing the Index after caching will clean up from Indices Request Cache
- public void testCacheClearanceAfterIndexClosure() throws Exception {
- int cacheCleanIntervalInMillis = 100;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index = "index";
- setupIndex(client, index);
-
- // assert there are no entries in the cache for index
- assertEquals(0, getRequestCacheStats(client, index).getMemorySizeInBytes());
- // assert there are no entries in the cache from other indices in the node
- assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
- // create first cache entry in index
- createCacheEntry(client, index, "hello");
- assertCacheState(client, index, 0, 1);
- assertTrue(getRequestCacheStats(client, index).getMemorySizeInBytes() > 0);
- assertTrue(getNodeCacheStats(client).getMemorySizeInBytes() > 0);
-
- // close index
- assertAcked(client.admin().indices().prepareClose(index));
- // request cache stats cannot be access since Index should be closed
- try {
- getRequestCacheStats(client, index);
- } catch (Exception e) {
- assert (e instanceof IndexClosedException);
- }
- // sleep until cache cleaner would have cleaned up the stale key from index
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale keys from index
- assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // deleting the Index after caching will clean up from Indices Request Cache
- public void testCacheCleanupAfterIndexDeletion() throws Exception {
- int cacheCleanIntervalInMillis = 100;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index = "index";
- setupIndex(client, index);
-
- // assert there are no entries in the cache for index
- assertEquals(0, getRequestCacheStats(client, index).getMemorySizeInBytes());
- // assert there are no entries in the cache from other indices in the node
- assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
- // create first cache entry in index
- createCacheEntry(client, index, "hello");
- assertCacheState(client, index, 0, 1);
- assertTrue(getRequestCacheStats(client, index).getMemorySizeInBytes() > 0);
- assertTrue(getNodeCacheStats(client).getMemorySizeInBytes() > 0);
-
- // delete index
- assertAcked(client.admin().indices().prepareDelete(index));
- // request cache stats cannot be access since Index should be deleted
- try {
- getRequestCacheStats(client, index);
- } catch (Exception e) {
- assert (e instanceof IndexNotFoundException);
- }
-
- // sleep until cache cleaner would have cleaned up the stale key from index
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale keys from index
- assertEquals(0, getNodeCacheStats(client).getMemorySizeInBytes());
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- // when staleness threshold is lower than staleness, it should clean the cache from all indices having stale keys
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13437")
- public void testStaleKeysCleanupWithMultipleIndices() throws Exception {
- int cacheCleanIntervalInMillis = 10;
- String node = internalCluster().startNode(
- Settings.builder()
- .put(IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_STALENESS_THRESHOLD_SETTING_KEY, 0.10)
- .put(
- IndicesRequestCache.INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING_KEY,
- TimeValue.timeValueMillis(cacheCleanIntervalInMillis)
- )
- );
- Client client = client(node);
- String index1 = "index1";
- String index2 = "index2";
- setupIndex(client, index1);
- setupIndex(client, index2);
-
- // assert cache is empty for index1
- assertEquals(0, getRequestCacheStats(client, index1).getMemorySizeInBytes());
- // create first cache entry in index1
- createCacheEntry(client, index1, "hello");
- assertCacheState(client, index1, 0, 1);
- long memorySizeForIndex1With1Entries = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1With1Entries > 0);
-
- // create second cache entry in index1
- createCacheEntry(client, index1, "there");
- assertCacheState(client, index1, 0, 2);
- long memorySizeForIndex1With2Entries = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- assertTrue(memorySizeForIndex1With2Entries > memorySizeForIndex1With1Entries);
-
- // assert cache is empty for index2
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // create first cache entry in index2
- createCacheEntry(client, index2, "hello");
- assertCacheState(client, index2, 0, 1);
- assertTrue(getRequestCacheStats(client, index2).getMemorySizeInBytes() > 0);
-
- // force refresh both index1 and index2
- flushAndRefresh(index1, index2);
- // create another cache entry in index 1 same as memorySizeForIndex1With1Entries, this should not be cleaned up.
- createCacheEntry(client, index1, "hello");
- // sleep until cache cleaner would have cleaned up the stale key from index2
- assertBusy(() -> {
- // cache cleaner should have cleaned up the stale key from index2 and hence cache should be empty
- assertEquals(0, getRequestCacheStats(client, index2).getMemorySizeInBytes());
- // cache cleaner should have only cleaned up the stale entities for index1
- long currentMemorySizeInBytesForIndex1 = getRequestCacheStats(client, index1).getMemorySizeInBytes();
- // assert the memory size of index1 to only contain 1 entry added after flushAndRefresh
- assertEquals(memorySizeForIndex1With1Entries, currentMemorySizeInBytesForIndex1);
- // cache for index1 should not be empty since there was an item cached after flushAndRefresh
- assertTrue(currentMemorySizeInBytesForIndex1 > 0);
- }, cacheCleanIntervalInMillis * 2, TimeUnit.MILLISECONDS);
- }
-
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13600")
public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception {
String node_1 = internalCluster().startNode(Settings.builder().build());
Client client = client(node_1);
@@ -1246,7 +688,12 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception {
logger.info("Creating an index: {} with 2 shards", indexName);
createIndex(
indexName,
- Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
+ Settings.builder()
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ // Disable index refreshing to avoid cache being invalidated mid-test
+ .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(-1))
+ .build()
);
ensureGreen(indexName);
@@ -1254,6 +701,9 @@ public void testDeleteAndCreateSameIndexShardOnSameNode() throws Exception {
logger.info("Writing few docs and searching those which will cache items in RequestCache");
indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello"));
indexRandom(true, client.prepareIndex(indexName).setSource("y", "hello again"));
+ ensureSearchable(indexName);
+ // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache
+ forceMerge(client, indexName);
SearchResponse resp = client.prepareSearch(indexName).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get();
assertSearchResponse(resp);
resp = client.prepareSearch(indexName).setRequestCache(true).setQuery(QueryBuilders.termQuery("y", "hello")).get();
@@ -1326,29 +776,10 @@ private Path[] shardDirectory(String server, Index index, int shard) {
return paths;
}
- private void setupIndex(Client client, String index) throws Exception {
- assertAcked(
- client.admin()
- .indices()
- .prepareCreate(index)
- .setMapping("k", "type=keyword")
- .setSettings(
- Settings.builder()
- .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
- .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
- .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
- )
- .get()
- );
- indexRandom(true, client.prepareIndex(index).setSource("k", "hello"));
- indexRandom(true, client.prepareIndex(index).setSource("k", "there"));
- ensureSearchable(index);
- }
-
- private void createCacheEntry(Client client, String index, String value) {
- SearchResponse resp = client.prepareSearch(index).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", value)).get();
- assertSearchResponse(resp);
- OpenSearchAssertions.assertAllSuccessful(resp);
+ private void forceMerge(Client client, String index) {
+ ForceMergeResponse forceMergeResponse = client.admin().indices().prepareForceMerge(index).setFlush(true).get();
+ OpenSearchAssertions.assertAllSuccessful(forceMergeResponse);
+ refreshAndWaitForReplication();
}
private static void assertCacheState(Client client, String index, long expectedHits, long expectedMisses) {
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
index 70da3b0e38472..2421a1a507372 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
@@ -1405,7 +1405,7 @@ public void testPitCreatedOnReplica() throws Exception {
.setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1)))
.setRequestCache(false)
.get();
- PitTestsUtil.assertUsingGetAllPits(client(replica), pitResponse.getId(), pitResponse.getCreationTime());
+ PitTestsUtil.assertUsingGetAllPits(client(replica), pitResponse.getId(), pitResponse.getCreationTime(), TimeValue.timeValueDays(1));
assertSegments(false, INDEX_NAME, 1, client(replica), pitResponse.getId());
List currentFiles = List.of(replicaShard.store().directory().listAll());
diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java
index 9481a6116cdbc..657d0f178e096 100644
--- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java
@@ -60,15 +60,18 @@
import org.opensearch.plugins.Plugin;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase;
+import org.hamcrest.MatcherAssert;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
+import java.util.stream.Collectors;
import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.opensearch.test.NodeRoles.nonIngestNode;
+import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue;
@@ -159,6 +162,14 @@ public void testSimulate() throws Exception {
}
public void testBulkWithIngestFailures() throws Exception {
+ runBulkTestWithRandomDocs(false);
+ }
+
+ public void testBulkWithIngestFailuresWithBatchSize() throws Exception {
+ runBulkTestWithRandomDocs(true);
+ }
+
+ private void runBulkTestWithRandomDocs(boolean shouldSetBatchSize) throws Exception {
createIndex("index");
BytesReference source = BytesReference.bytes(
@@ -177,6 +188,9 @@ public void testBulkWithIngestFailures() throws Exception {
int numRequests = scaledRandomIntBetween(32, 128);
BulkRequest bulkRequest = new BulkRequest();
+ if (shouldSetBatchSize) {
+ bulkRequest.batchSize(scaledRandomIntBetween(2, numRequests));
+ }
for (int i = 0; i < numRequests; i++) {
IndexRequest indexRequest = new IndexRequest("index").id(Integer.toString(i)).setPipeline("_id");
indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", i % 2 == 0);
@@ -200,6 +214,9 @@ public void testBulkWithIngestFailures() throws Exception {
);
assertThat(indexResponse, notNullValue());
assertThat(indexResponse.getId(), equalTo(Integer.toString(i)));
+ // verify field of successful doc
+ Map successDoc = client().prepareGet("index", indexResponse.getId()).get().getSourceAsMap();
+ assertThat(successDoc.get("processed"), equalTo(true));
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
}
}
@@ -209,6 +226,58 @@ public void testBulkWithIngestFailures() throws Exception {
assertTrue(deletePipelineResponse.isAcknowledged());
}
+ public void testBulkWithIngestFailuresAndDropBatch() throws Exception {
+ createIndex("index");
+
+ BytesReference source = BytesReference.bytes(
+ jsonBuilder().startObject()
+ .field("description", "my_pipeline")
+ .startArray("processors")
+ .startObject()
+ .startObject("test")
+ .endObject()
+ .endObject()
+ .endArray()
+ .endObject()
+ );
+ PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id", source, MediaTypeRegistry.JSON);
+ client().admin().cluster().putPipeline(putPipelineRequest).get();
+
+ BulkRequest bulkRequest = new BulkRequest();
+ bulkRequest.batchSize(3);
+ bulkRequest.add(
+ new IndexRequest("index").id("_fail").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", true)
+ );
+ bulkRequest.add(
+ new IndexRequest("index").id("_success").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "fail", false)
+ );
+ bulkRequest.add(
+ new IndexRequest("index").id("_drop").setPipeline("_id").source(Requests.INDEX_CONTENT_TYPE, "field", "value", "drop", true)
+ );
+
+ BulkResponse response = client().bulk(bulkRequest).actionGet();
+ MatcherAssert.assertThat(response.getItems().length, equalTo(bulkRequest.requests().size()));
+
+ Map results = Arrays.stream(response.getItems())
+ .collect(Collectors.toMap(BulkItemResponse::getId, r -> r));
+
+ MatcherAssert.assertThat(results.keySet(), containsInAnyOrder("_fail", "_success", "_drop"));
+ assertNotNull(results.get("_fail").getFailure());
+ assertNull(results.get("_success").getFailure());
+ assertNull(results.get("_drop").getFailure());
+
+ // verify dropped doc not in index
+ assertNull(client().prepareGet("index", "_drop").get().getSourceAsMap());
+
+ // verify field of successful doc
+ Map successDoc = client().prepareGet("index", "_success").get().getSourceAsMap();
+ assertThat(successDoc.get("processed"), equalTo(true));
+
+ // cleanup
+ AcknowledgedResponse deletePipelineResponse = client().admin().cluster().prepareDeletePipeline("_id").get();
+ assertTrue(deletePipelineResponse.isAcknowledged());
+ }
+
public void testBulkWithUpsert() throws Exception {
createIndex("index");
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java
index 901b36f872622..5be9b25512704 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java
@@ -9,6 +9,8 @@
package org.opensearch.remotemigration;
import org.opensearch.action.DocWriteResponse;
+import org.opensearch.action.admin.cluster.health.ClusterHealthRequest;
+import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.opensearch.action.bulk.BulkRequest;
@@ -16,11 +18,15 @@
import org.opensearch.action.delete.DeleteResponse;
import org.opensearch.action.index.IndexRequest;
import org.opensearch.action.index.IndexResponse;
+import org.opensearch.client.Requests;
import org.opensearch.cluster.ClusterState;
+import org.opensearch.cluster.health.ClusterHealthStatus;
import org.opensearch.cluster.metadata.RepositoryMetadata;
import org.opensearch.cluster.routing.RoutingNode;
+import org.opensearch.common.Priority;
import org.opensearch.common.UUIDs;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.unit.TimeValue;
import org.opensearch.repositories.fs.ReloadableFsRepository;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.junit.Before;
@@ -39,6 +45,7 @@
import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING;
import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
public class MigrationBaseTestCase extends OpenSearchIntegTestCase {
protected static final String REPOSITORY_NAME = "test-remote-store-repo";
@@ -114,6 +121,10 @@ public void initDocRepToRemoteMigration() {
);
}
+ public ClusterHealthStatus ensureGreen(String... indices) {
+ return ensureGreen(TimeValue.timeValueSeconds(60), indices);
+ }
+
public BulkResponse indexBulk(String indexName, int numDocs) {
BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < numDocs; i++) {
@@ -181,14 +192,12 @@ private Thread getIndexingThread() {
long currentDocCount = indexedDocs.incrementAndGet();
if (currentDocCount > 0 && currentDocCount % refreshFrequency == 0) {
if (rarely()) {
- logger.info("--> [iteration {}] flushing index", currentDocCount);
client().admin().indices().prepareFlush(indexName).get();
+ logger.info("Completed ingestion of {} docs. Flushing now", currentDocCount);
} else {
- logger.info("--> [iteration {}] refreshing index", currentDocCount);
client().admin().indices().prepareRefresh(indexName).get();
}
}
- logger.info("Completed ingestion of {} docs", currentDocCount);
}
});
}
@@ -218,4 +227,38 @@ public void stopShardRebalancing() {
.get()
);
}
+
+ public ClusterHealthStatus waitForRelocation() {
+ ClusterHealthRequest request = Requests.clusterHealthRequest()
+ .waitForNoRelocatingShards(true)
+ .timeout(TimeValue.timeValueSeconds(60))
+ .waitForEvents(Priority.LANGUID);
+ ClusterHealthResponse actionGet = client().admin().cluster().health(request).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info(
+ "waitForRelocation timed out, cluster state:\n{}\n{}",
+ client().admin().cluster().prepareState().get().getState(),
+ client().admin().cluster().preparePendingClusterTasks().get()
+ );
+ assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false));
+ }
+ return actionGet.getStatus();
+ }
+
+ public ClusterHealthStatus waitForRelocation(TimeValue t) {
+ ClusterHealthRequest request = Requests.clusterHealthRequest()
+ .waitForNoRelocatingShards(true)
+ .timeout(t)
+ .waitForEvents(Priority.LANGUID);
+ ClusterHealthResponse actionGet = client().admin().cluster().health(request).actionGet();
+ if (actionGet.isTimedOut()) {
+ logger.info(
+ "waitForRelocation timed out, cluster state:\n{}\n{}",
+ client().admin().cluster().prepareState().get().getState(),
+ client().admin().cluster().preparePendingClusterTasks().get()
+ );
+ assertThat("timed out waiting for relocation", actionGet.isTimedOut(), equalTo(false));
+ }
+ return actionGet.getStatus();
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java
index 5094a7cf29c6a..d046f41ce0590 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteDualReplicationIT.java
@@ -18,6 +18,7 @@
import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.IndexService;
+import org.opensearch.index.ReplicationStats;
import org.opensearch.index.remote.RemoteSegmentStats;
import org.opensearch.index.seqno.RetentionLease;
import org.opensearch.index.seqno.RetentionLeases;
@@ -665,6 +666,43 @@ public void testFailoverRemotePrimaryToDocrepReplicaReseedToRemotePrimary() thro
});
}
+ /*
+ Performs the same experiment as testRemotePrimaryDocRepReplica.
+
+ This ensures that the primary shard for the index has moved over to remote
+ enabled node whereas the replica copy is still left behind on the docrep nodes
+
+ At this stage, segrep lag computation shouldn't consider the docrep shard copy while calculating bytes lag
+ */
+ public void testZeroSegrepLagForShardsWithMixedReplicationGroup() throws Exception {
+ testRemotePrimaryDocRepReplica();
+ String remoteNodeName = internalCluster().client()
+ .admin()
+ .cluster()
+ .prepareNodesStats()
+ .get()
+ .getNodes()
+ .stream()
+ .filter(nodeStats -> nodeStats.getNode().isRemoteStoreNode())
+ .findFirst()
+ .get()
+ .getNode()
+ .getName();
+ ReplicationStats replicationStats = internalCluster().client()
+ .admin()
+ .cluster()
+ .prepareNodesStats(remoteNodeName)
+ .get()
+ .getNodes()
+ .get(0)
+ .getIndices()
+ .getSegments()
+ .getReplicationStats();
+ assertEquals(0, replicationStats.getMaxBytesBehind());
+ assertEquals(0, replicationStats.getTotalBytesBehind());
+ assertEquals(0, replicationStats.getMaxReplicationLag());
+ }
+
private void assertReplicaAndPrimaryConsistency(String indexName, int firstBatch, int secondBatch) throws Exception {
assertBusy(() -> {
Map shardStatsMap = internalCluster().client()
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java
index 793adef0594fc..216c104dfecc1 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java
@@ -8,6 +8,8 @@
package org.opensearch.remotemigration;
+import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction;
+import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest;
import org.opensearch.cluster.ClusterState;
import org.opensearch.cluster.health.ClusterHealthStatus;
import org.opensearch.cluster.metadata.IndexMetadata;
@@ -273,7 +275,6 @@ initalMetadataVersion < internalCluster().client()
* After shard relocation completes, shuts down the docrep nodes and asserts remote
* index settings are applied even when the index is in YELLOW state
*/
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13737")
public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Exception {
internalCluster().startClusterManagerOnlyNode();
@@ -330,7 +331,6 @@ public void testIndexSettingsUpdatedEvenForMisconfiguredReplicas() throws Except
* After shard relocation completes, restarts the docrep node holding extra replica shard copy
* and asserts remote index settings are applied as soon as the docrep replica copy is unassigned
*/
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13871")
public void testIndexSettingsUpdatedWhenDocrepNodeIsRestarted() throws Exception {
internalCluster().startClusterManagerOnlyNode();
@@ -471,7 +471,6 @@ public void testRemotePathMetadataAddedWithFirstPrimaryMovingToRemote() throws E
* exclude docrep nodes, assert that remote index path file exists
* when shards start relocating to the remote nodes.
*/
- @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/13939")
public void testRemoteIndexPathFileExistsAfterMigration() throws Exception {
String docrepClusterManager = internalCluster().startClusterManagerOnlyNode();
@@ -518,7 +517,11 @@ public void testRemoteIndexPathFileExistsAfterMigration() throws Exception {
.isAcknowledged()
);
- internalCluster().stopRandomNode(InternalTestCluster.nameFilter(docrepClusterManager));
+ // elect cluster manager with remote-cluster state enabled
+ internalCluster().client()
+ .execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(docrepClusterManager))
+ .get();
+
internalCluster().validateClusterFormed();
logger.info("---> Excluding docrep nodes from allocation");
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java
index cea653c0ead4b..fa3b9368ded47 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemotePrimaryRelocationIT.java
@@ -99,16 +99,7 @@ public void testRemotePrimaryRelocation() throws Exception {
.add(new MoveAllocationCommand("test", 0, primaryNodeName("test"), remoteNode))
.execute()
.actionGet();
- ClusterHealthResponse clusterHealthResponse = client().admin()
- .cluster()
- .prepareHealth()
- .setTimeout(TimeValue.timeValueSeconds(60))
- .setWaitForEvents(Priority.LANGUID)
- .setWaitForNoRelocatingShards(true)
- .execute()
- .actionGet();
-
- assertEquals(0, clusterHealthResponse.getRelocatingShards());
+ waitForRelocation();
assertEquals(remoteNode, primaryNodeName("test"));
logger.info("--> relocation from docrep to remote complete");
@@ -123,16 +114,7 @@ public void testRemotePrimaryRelocation() throws Exception {
.add(new MoveAllocationCommand("test", 0, remoteNode, remoteNode2))
.execute()
.actionGet();
- clusterHealthResponse = client().admin()
- .cluster()
- .prepareHealth()
- .setTimeout(TimeValue.timeValueSeconds(60))
- .setWaitForEvents(Priority.LANGUID)
- .setWaitForNoRelocatingShards(true)
- .execute()
- .actionGet();
-
- assertEquals(0, clusterHealthResponse.getRelocatingShards());
+ waitForRelocation();
assertEquals(remoteNode2, primaryNodeName("test"));
logger.info("--> relocation from remote to remote complete");
@@ -155,7 +137,6 @@ public void testRemotePrimaryRelocation() throws Exception {
public void testMixedModeRelocation_RemoteSeedingFail() throws Exception {
String docRepNode = internalCluster().startNode();
- Client client = internalCluster().client(docRepNode);
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed"));
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java
index aae726fe2a6bc..d6e25c0cab3ac 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteReplicaRecoveryIT.java
@@ -8,15 +8,12 @@
package org.opensearch.remotemigration;
-import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse;
import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand;
-import org.opensearch.common.Priority;
import org.opensearch.common.settings.Settings;
-import org.opensearch.common.unit.TimeValue;
import org.opensearch.index.SegmentReplicationPerGroupStats;
import org.opensearch.index.query.QueryBuilders;
import org.opensearch.test.OpenSearchIntegTestCase;
@@ -83,16 +80,8 @@ public void testReplicaRecovery() throws Exception {
.add(new MoveAllocationCommand("test", 0, primaryNode, remoteNode))
.execute()
.actionGet();
- ClusterHealthResponse clusterHealthResponse = client().admin()
- .cluster()
- .prepareHealth()
- .setTimeout(TimeValue.timeValueSeconds(60))
- .setWaitForEvents(Priority.LANGUID)
- .setWaitForNoRelocatingShards(true)
- .execute()
- .actionGet();
- assertEquals(0, clusterHealthResponse.getRelocatingShards());
+ waitForRelocation();
logger.info("--> relocation of primary from docrep to remote complete");
logger.info("--> getting up the new replicas now to doc rep node as well as remote node ");
@@ -109,17 +98,7 @@ public void testReplicaRecovery() throws Exception {
)
.get();
- client().admin()
- .cluster()
- .prepareHealth()
- .setTimeout(TimeValue.timeValueSeconds(60))
- .setWaitForEvents(Priority.LANGUID)
- .setWaitForGreenStatus()
- .execute()
- .actionGet();
- logger.info("--> replica is up now on another docrep now as well as remote node");
-
- assertEquals(0, clusterHealthResponse.getRelocatingShards());
+ waitForRelocation();
asyncIndexingService.stopIndexing();
refresh("test");
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java
index 4e4f6da56d622..e0e25db4ca722 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java
@@ -8,13 +8,11 @@
package org.opensearch.remotemigration;
-import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.opensearch.client.Client;
import org.opensearch.cluster.metadata.IndexMetadata;
-import org.opensearch.common.Priority;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.FeatureFlags;
@@ -28,6 +26,7 @@
import java.util.List;
import java.util.Map;
+import static org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING;
import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING;
import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@@ -48,6 +47,10 @@ protected Settings featureFlagSettings() {
return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE_MIGRATION_EXPERIMENTAL, "true").build();
}
+ protected int maximumNumberOfShards() {
+ return 5;
+ }
+
public void testMixedModeAddRemoteNodes() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(0);
List cmNodes = internalCluster().startNodes(1);
@@ -155,7 +158,11 @@ public void testEndToEndRemoteMigration() throws Exception {
internalCluster().setBootstrapClusterManagerNodeIndex(0);
List docRepNodes = internalCluster().startNodes(2);
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
- updateSettingsRequest.persistentSettings(Settings.builder().put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed"));
+ updateSettingsRequest.persistentSettings(
+ Settings.builder()
+ .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed")
+ .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), maximumNumberOfShards())
+ );
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get();
ensureGreen("test");
@@ -189,16 +196,7 @@ public void testEndToEndRemoteMigration() throws Exception {
)
.get()
);
-
- ClusterHealthResponse clusterHealthResponse = client().admin()
- .cluster()
- .prepareHealth()
- .setTimeout(TimeValue.timeValueSeconds(45))
- .setWaitForEvents(Priority.LANGUID)
- .setWaitForNoRelocatingShards(true)
- .execute()
- .actionGet();
- assertTrue(clusterHealthResponse.getRelocatingShards() == 0);
+ waitForRelocation(TimeValue.timeValueSeconds(90));
logger.info("---> Stopping indexing thread");
asyncIndexingService.stopIndexing();
Map shardCountByNodeId = getShardCountByNodeId();
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java
index b22817ef19d1b..11260e0914dc5 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java
@@ -57,6 +57,7 @@
import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK;
import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING;
import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING;
+import static org.opensearch.gateway.remote.RemoteClusterStateUtils.encodeString;
import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE;
import static org.opensearch.repositories.blobstore.BlobStoreRepository.SYSTEM_REPOSITORY_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@@ -326,9 +327,7 @@ public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathTh
// Step - 3 Delete index metadata file in remote
try {
Files.move(
- segmentRepoPath.resolve(
- RemoteClusterStateService.encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index"
- ),
+ segmentRepoPath.resolve(encodeString(clusterName) + "/cluster-state/" + prevClusterUUID + "/index"),
segmentRepoPath.resolve("cluster-state/")
);
} catch (IOException e) {
@@ -354,10 +353,7 @@ public void testRemoteStateFullRestart() throws Exception {
try {
Files.move(
segmentRepoPath.resolve(
- RemoteClusterStateService.encodeString(clusterService().state().getClusterName().value())
- + "/cluster-state/"
- + prevClusterUUID
- + "/manifest"
+ encodeString(clusterService().state().getClusterName().value()) + "/cluster-state/" + prevClusterUUID + "/manifest"
),
segmentRepoPath.resolve("cluster-state/")
);
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
index 96d6338e5913b..194dce5f4a57a 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
@@ -65,7 +65,6 @@
import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA;
import static org.opensearch.index.shard.IndexShardTestCase.getTranslog;
import static org.opensearch.indices.RemoteStoreSettings.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING;
-import static org.opensearch.test.OpenSearchTestCase.getShardLevelBlobPath;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.comparesEqualTo;
@@ -133,6 +132,21 @@ private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throw
);
}
+ public void testRemoteStoreIndexCreationAndDeletionWithReferencedStore() throws InterruptedException, ExecutionException {
+ String dataNode = internalCluster().startNodes(1).get(0);
+ createIndex(INDEX_NAME, remoteStoreIndexSettings(0));
+ ensureYellowAndNoInitializingShards(INDEX_NAME);
+ ensureGreen(INDEX_NAME);
+
+ IndexShard indexShard = getIndexShard(dataNode, INDEX_NAME);
+
+ // Simulating a condition where store is already in use by increasing ref count, this helps in testing index
+ // deletion when refresh is in-progress.
+ indexShard.store().incRef();
+ assertAcked(client().admin().indices().prepareDelete(INDEX_NAME));
+ indexShard.store().decRef();
+ }
+
public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogNoDataFlush() throws Exception {
testPeerRecovery(1, true);
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java
new file mode 100644
index 0000000000000..a51bd6b20fff0
--- /dev/null
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/WritableWarmIT.java
@@ -0,0 +1,160 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.remotestore;
+
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters;
+
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.FilterDirectory;
+import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.opensearch.action.admin.indices.get.GetIndexRequest;
+import org.opensearch.action.admin.indices.get.GetIndexResponse;
+import org.opensearch.action.search.SearchResponse;
+import org.opensearch.cluster.metadata.IndexMetadata;
+import org.opensearch.common.settings.Settings;
+import org.opensearch.common.settings.SettingsException;
+import org.opensearch.common.util.FeatureFlags;
+import org.opensearch.index.IndexModule;
+import org.opensearch.index.query.QueryBuilders;
+import org.opensearch.index.shard.IndexShard;
+import org.opensearch.index.store.CompositeDirectory;
+import org.opensearch.index.store.remote.file.CleanerDaemonThreadLeakFilter;
+import org.opensearch.index.store.remote.filecache.FileCache;
+import org.opensearch.index.store.remote.utils.FileTypeUtils;
+import org.opensearch.indices.IndicesService;
+import org.opensearch.node.Node;
+import org.opensearch.test.InternalTestCluster;
+import org.opensearch.test.OpenSearchIntegTestCase;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
+import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
+
+@ThreadLeakFilters(filters = CleanerDaemonThreadLeakFilter.class)
+@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, supportsDedicatedMasters = false)
+// Uncomment the below line to enable trace level logs for this test for better debugging
+// @TestLogging(reason = "Getting trace logs from composite directory package", value = "org.opensearch.index.store:TRACE")
+public class WritableWarmIT extends RemoteStoreBaseIntegTestCase {
+
+ protected static final String INDEX_NAME = "test-idx-1";
+ protected static final int NUM_DOCS_IN_BULK = 1000;
+
+ /*
+ Disabling MockFSIndexStore plugin as the MockFSDirectoryFactory wraps the FSDirectory over a OpenSearchMockDirectoryWrapper which extends FilterDirectory (whereas FSDirectory extends BaseDirectory)
+ As a result of this wrapping the local directory of Composite Directory does not satisfy the assertion that local directory must be of type FSDirectory
+ */
+ @Override
+ protected boolean addMockIndexStorePlugin() {
+ return false;
+ }
+
+ @Override
+ protected Settings featureFlagSettings() {
+ Settings.Builder featureSettings = Settings.builder();
+ featureSettings.put(FeatureFlags.TIERED_REMOTE_INDEX, true);
+ return featureSettings.build();
+ }
+
+ public void testWritableWarmFeatureFlagDisabled() {
+ Settings clusterSettings = Settings.builder().put(super.nodeSettings(0)).put(FeatureFlags.TIERED_REMOTE_INDEX, false).build();
+ InternalTestCluster internalTestCluster = internalCluster();
+ internalTestCluster.startClusterManagerOnlyNode(clusterSettings);
+ internalTestCluster.startDataOnlyNode(clusterSettings);
+
+ Settings indexSettings = Settings.builder()
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name())
+ .build();
+
+ try {
+ prepareCreate(INDEX_NAME).setSettings(indexSettings).get();
+ fail("Should have thrown Exception as setting should not be registered if Feature Flag is Disabled");
+ } catch (SettingsException ex) {
+ assertEquals(
+ "unknown setting ["
+ + IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey()
+ + "] please check that any required plugins are installed, or check the "
+ + "breaking changes documentation for removed settings",
+ ex.getMessage()
+ );
+ }
+ }
+
+ public void testWritableWarmBasic() throws Exception {
+ InternalTestCluster internalTestCluster = internalCluster();
+ internalTestCluster.startClusterManagerOnlyNode();
+ internalTestCluster.startDataOnlyNode();
+ Settings settings = Settings.builder()
+ .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
+ .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ .put(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey(), IndexModule.DataLocalityType.PARTIAL.name())
+ .build();
+ assertAcked(client().admin().indices().prepareCreate(INDEX_NAME).setSettings(settings).get());
+
+ // Verify from the cluster settings if the data locality is partial
+ GetIndexResponse getIndexResponse = client().admin()
+ .indices()
+ .getIndex(new GetIndexRequest().indices(INDEX_NAME).includeDefaults(true))
+ .get();
+ Settings indexSettings = getIndexResponse.settings().get(INDEX_NAME);
+ assertEquals(IndexModule.DataLocalityType.PARTIAL.name(), indexSettings.get(IndexModule.INDEX_STORE_LOCALITY_SETTING.getKey()));
+
+ // Ingesting some docs
+ indexBulk(INDEX_NAME, NUM_DOCS_IN_BULK);
+ flushAndRefresh(INDEX_NAME);
+
+ // ensuring cluster is green after performing force-merge
+ ensureGreen();
+
+ SearchResponse searchResponse = client().prepareSearch(INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get();
+ // Asserting that search returns same number of docs as ingested
+ assertHitCount(searchResponse, NUM_DOCS_IN_BULK);
+
+ // Ingesting docs again before force merge
+ indexBulk(INDEX_NAME, NUM_DOCS_IN_BULK);
+ flushAndRefresh(INDEX_NAME);
+
+ FileCache fileCache = internalTestCluster.getDataNodeInstance(Node.class).fileCache();
+ IndexShard shard = internalTestCluster.getDataNodeInstance(IndicesService.class)
+ .indexService(resolveIndex(INDEX_NAME))
+ .getShardOrNull(0);
+ Directory directory = (((FilterDirectory) (((FilterDirectory) (shard.store().directory())).getDelegate())).getDelegate());
+
+ // Force merging the index
+ Set filesBeforeMerge = new HashSet<>(Arrays.asList(directory.listAll()));
+ client().admin().indices().prepareForceMerge(INDEX_NAME).setMaxNumSegments(1).get();
+ flushAndRefresh(INDEX_NAME);
+ Set filesAfterMerge = new HashSet<>(Arrays.asList(directory.listAll()));
+
+ Set filesFromPreviousGenStillPresent = filesBeforeMerge.stream()
+ .filter(filesAfterMerge::contains)
+ .filter(file -> !FileTypeUtils.isLockFile(file))
+ .filter(file -> !FileTypeUtils.isSegmentsFile(file))
+ .collect(Collectors.toUnmodifiableSet());
+
+ // Asserting that after merge all the files from previous gen are no more part of the directory
+ assertTrue(filesFromPreviousGenStillPresent.isEmpty());
+
+ // Asserting that files from previous gen are not present in File Cache as well
+ filesBeforeMerge.stream()
+ .filter(file -> !FileTypeUtils.isLockFile(file))
+ .filter(file -> !FileTypeUtils.isSegmentsFile(file))
+ .forEach(file -> assertNull(fileCache.get(((CompositeDirectory) directory).getFilePath(file))));
+
+ // Deleting the index (so that ref count drops to zero for all the files) and then pruning the cache to clear it to avoid any file
+ // leaks
+ assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get());
+ fileCache.prune();
+ }
+}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java
index 333fba413ce4e..1abacbe5091dd 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/translogmetadata/mocks/MockFsMetadataSupportedRepository.java
@@ -16,9 +16,9 @@
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.env.Environment;
import org.opensearch.indices.recovery.RecoverySettings;
-import org.opensearch.repositories.fs.FsRepository;
+import org.opensearch.repositories.fs.ReloadableFsRepository;
-public class MockFsMetadataSupportedRepository extends FsRepository {
+public class MockFsMetadataSupportedRepository extends ReloadableFsRepository {
public static Setting TRIGGER_DATA_INTEGRITY_FAILURE = Setting.boolSetting(
"mock_fs_repository.trigger_data_integrity_failure",
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java
index 4a8b00ea45738..4051bee3e4e5c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/AggregationsIntegrationIT.java
@@ -187,4 +187,27 @@ public void testAggsOnEmptyShards() {
// Validate non-global agg does not throw an exception
assertSearchResponse(client().prepareSearch("idx").addAggregation(stats("value_stats").field("score")).get());
}
+
+ public void testAggsWithTerminateAfter() throws InterruptedException {
+ assertAcked(
+ prepareCreate(
+ "terminate_index",
+ Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
+ ).setMapping("f", "type=keyword").get()
+ );
+ List docs = new ArrayList<>();
+ for (int i = 0; i < randomIntBetween(5, 20); ++i) {
+ docs.add(client().prepareIndex("terminate_index").setSource("f", Integer.toString(i / 3)));
+ }
+ indexRandom(true, docs);
+
+ SearchResponse response = client().prepareSearch("terminate_index")
+ .setSize(2)
+ .setTerminateAfter(1)
+ .addAggregation(terms("f").field("f"))
+ .get();
+ assertSearchResponse(response);
+ assertTrue(response.isTerminatedEarly());
+ assertEquals(response.getHits().getHits().length, 1);
+ }
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java
index db4ee3571d141..b2ed689622e7d 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityIT.java
@@ -34,6 +34,7 @@
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.opensearch.action.index.IndexRequestBuilder;
import org.opensearch.action.search.SearchResponse;
import org.opensearch.common.settings.Settings;
@@ -59,6 +60,7 @@
import static java.util.Collections.emptyMap;
import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.opensearch.index.query.QueryBuilders.matchAllQuery;
+import static org.opensearch.search.SearchService.CARDINALITY_AGGREGATION_PRUNING_THRESHOLD;
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
import static org.opensearch.search.aggregations.AggregationBuilders.cardinality;
import static org.opensearch.search.aggregations.AggregationBuilders.global;
@@ -255,6 +257,36 @@ public void testSingleValuedString() throws Exception {
assertCount(count, numDocs);
}
+ public void testDisableDynamicPruning() throws Exception {
+ SearchResponse response = client().prepareSearch("idx")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value"))
+ .get();
+ assertSearchResponse(response);
+
+ Cardinality count1 = response.getAggregations().get("cardinality");
+
+ final ClusterUpdateSettingsResponse updateSettingResponse = client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey(), 0))
+ .get();
+ assertEquals(updateSettingResponse.getTransientSettings().get(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey()), "0");
+
+ response = client().prepareSearch("idx")
+ .addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field("str_value"))
+ .get();
+ assertSearchResponse(response);
+ Cardinality count2 = response.getAggregations().get("cardinality");
+
+ assertEquals(count1, count2);
+
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().putNull(CARDINALITY_AGGREGATION_PRUNING_THRESHOLD.getKey()))
+ .get();
+ }
+
public void testSingleValuedNumeric() throws Exception {
SearchResponse response = client().prepareSearch("idx")
.addAggregation(cardinality("cardinality").precisionThreshold(precisionThreshold).field(singleNumericField()))
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java
index 8bea5ef97fbba..faec3977f94ef 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/pit/PitMultiNodeIT.java
@@ -104,7 +104,7 @@ public void testPit() throws Exception {
assertEquals(2, searchResponse.getSuccessfulShards());
assertEquals(2, searchResponse.getTotalShards());
validatePitStats("index", 2, 2);
- PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime());
+ PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime(), TimeValue.timeValueDays(1));
assertSegments(false, client(), pitResponse.getId());
}
@@ -131,7 +131,12 @@ public void testCreatePitWhileNodeDropWithAllowPartialCreationTrue() throws Exce
public Settings onNodeStopped(String nodeName) throws Exception {
ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request);
CreatePitResponse pitResponse = execute.get();
- PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime());
+ PitTestsUtil.assertUsingGetAllPits(
+ client(),
+ pitResponse.getId(),
+ pitResponse.getCreationTime(),
+ TimeValue.timeValueDays(1)
+ );
assertSegments(false, "index", 1, client(), pitResponse.getId());
assertEquals(1, pitResponse.getSuccessfulShards());
assertEquals(2, pitResponse.getTotalShards());
@@ -164,7 +169,12 @@ public Settings onNodeStopped(String nodeName) throws Exception {
assertEquals(0, searchResponse.getSkippedShards());
assertEquals(2, searchResponse.getTotalShards());
validatePitStats("index", 1, 1);
- PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime());
+ PitTestsUtil.assertUsingGetAllPits(
+ client(),
+ pitResponse.getId(),
+ pitResponse.getCreationTime(),
+ TimeValue.timeValueDays(1)
+ );
return super.onNodeStopped(nodeName);
}
});
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java
index c43a9c23661ea..8841638328ea4 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java
@@ -45,7 +45,7 @@
import org.opensearch.index.query.QueryStringQueryBuilder;
import org.opensearch.search.SearchHit;
import org.opensearch.search.SearchHits;
-import org.opensearch.search.SearchModule;
+import org.opensearch.search.SearchService;
import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase;
import org.junit.Before;
import org.junit.BeforeClass;
@@ -101,7 +101,7 @@ public void setup() throws Exception {
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
- .put(SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT)
+ .put(SearchService.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT)
.build();
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java
index a58db51780826..01ad06757640c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java
@@ -1914,14 +1914,8 @@ public void testRangeQueryWithTimeZone() throws Exception {
* Test range with a custom locale, e.g. "de" in this case. Documents here mention the day of week
* as "Mi" for "Mittwoch (Wednesday" and "Do" for "Donnerstag (Thursday)" and the month in the query
* as "Dez" for "Dezember (December)".
- * Note: this test currently needs the JVM arg `-Djava.locale.providers=SPI,COMPAT` to be set.
- * When running with gradle this is done implicitly through the BuildPlugin, but when running from
- * an IDE this might need to be set manually in the run configuration. See also CONTRIBUTING.md section
- * on "Configuring IDEs And Running Tests".
*/
public void testRangeQueryWithLocaleMapping() throws Exception {
- assert ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))) : "`-Djava.locale.providers=SPI,COMPAT` needs to be set";
-
assertAcked(
prepareCreate("test").setMapping(
jsonBuilder().startObject()
@@ -1938,17 +1932,21 @@ public void testRangeQueryWithLocaleMapping() throws Exception {
indexRandom(
true,
- client().prepareIndex("test").setId("1").setSource("date_field", "Mi, 06 Dez 2000 02:55:00 -0800"),
- client().prepareIndex("test").setId("2").setSource("date_field", "Do, 07 Dez 2000 02:55:00 -0800")
+ client().prepareIndex("test").setId("1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"),
+ client().prepareIndex("test").setId("2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800")
);
SearchResponse searchResponse = client().prepareSearch("test")
- .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Do, 07 Dez 2000 00:00:00 -0800"))
+ .setQuery(
+ QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Do., 07 Dez. 2000 00:00:00 -0800")
+ )
.get();
assertHitCount(searchResponse, 1L);
searchResponse = client().prepareSearch("test")
- .setQuery(QueryBuilders.rangeQuery("date_field").gte("Di, 05 Dez 2000 02:55:00 -0800").lte("Fr, 08 Dez 2000 00:00:00 -0800"))
+ .setQuery(
+ QueryBuilders.rangeQuery("date_field").gte("Di., 05 Dez. 2000 02:55:00 -0800").lte("Fr., 08 Dez. 2000 00:00:00 -0800")
+ )
.get();
assertHitCount(searchResponse, 2L);
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java
index cae543506f919..f9ccdbd62de1c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SimpleQueryStringIT.java
@@ -57,7 +57,7 @@
import org.opensearch.plugins.Plugin;
import org.opensearch.search.SearchHit;
import org.opensearch.search.SearchHits;
-import org.opensearch.search.SearchModule;
+import org.opensearch.search.SearchService;
import org.opensearch.search.builder.SearchSourceBuilder;
import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase;
import org.junit.BeforeClass;
@@ -79,6 +79,7 @@
import static org.opensearch.index.query.QueryBuilders.simpleQueryStringQuery;
import static org.opensearch.index.query.QueryBuilders.termQuery;
import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING;
+import static org.opensearch.search.SearchService.INDICES_MAX_CLAUSE_COUNT_SETTING;
import static org.opensearch.test.StreamsUtils.copyToStringFromClasspath;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFailures;
@@ -122,7 +123,7 @@ public static void createRandomClusterSetting() {
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
- .put(SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT)
+ .put(SearchService.INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT)
.build();
}
@@ -720,6 +721,52 @@ public void testFieldAliasOnDisallowedFieldType() throws Exception {
assertHits(response.getHits(), "1");
}
+ public void testDynamicClauseCountUpdate() throws Exception {
+ client().prepareIndex("testdynamic").setId("1").setSource("field", "foo bar baz").get();
+ assertAcked(
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT - 1))
+ );
+ refresh();
+ StringBuilder sb = new StringBuilder("foo");
+
+ // create clause_count + 1 clauses to hit error
+ for (int i = 0; i <= CLUSTER_MAX_CLAUSE_COUNT; i++) {
+ sb.append(" OR foo" + i);
+ }
+
+ QueryStringQueryBuilder qb = queryStringQuery(sb.toString()).field("field");
+
+ SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> {
+ client().prepareSearch("testdynamic").setQuery(qb).get();
+ });
+
+ assert (e.getDetailedMessage().contains("maxClauseCount is set to " + (CLUSTER_MAX_CLAUSE_COUNT - 1)));
+
+ // increase clause count by 2
+ assertAcked(
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().put(INDICES_MAX_CLAUSE_COUNT_SETTING.getKey(), CLUSTER_MAX_CLAUSE_COUNT + 2))
+ );
+
+ Thread.sleep(1);
+
+ SearchResponse response = client().prepareSearch("testdynamic").setQuery(qb).get();
+ assertHitCount(response, 1);
+ assertHits(response.getHits(), "1");
+
+ assertAcked(
+ client().admin()
+ .cluster()
+ .prepareUpdateSettings()
+ .setTransientSettings(Settings.builder().putNull(INDICES_MAX_CLAUSE_COUNT_SETTING.getKey()))
+ );
+ }
+
private void assertHits(SearchHits hits, String... ids) {
assertThat(hits.getTotalHits().value, equalTo((long) ids.length));
Set hitIds = new HashSet<>();
diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java
index b41dd99ff6d40..1c199df4d548e 100644
--- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java
@@ -17,6 +17,7 @@
import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
+import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest;
import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
@@ -27,6 +28,7 @@
import org.opensearch.cluster.block.ClusterBlockException;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.cluster.node.DiscoveryNodeRole;
import org.opensearch.cluster.routing.GroupShardsIterator;
import org.opensearch.cluster.routing.ShardIterator;
import org.opensearch.cluster.routing.ShardRouting;
@@ -34,6 +36,7 @@
import org.opensearch.common.Priority;
import org.opensearch.common.io.PathUtils;
import org.opensearch.common.settings.Settings;
+import org.opensearch.common.settings.SettingsException;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.core.common.unit.ByteSizeUnit;
import org.opensearch.core.index.Index;
@@ -53,19 +56,24 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS;
+import static org.opensearch.common.util.FeatureFlags.TIERED_REMOTE_INDEX;
import static org.opensearch.core.common.util.CollectionUtils.iterableAsArrayList;
import static org.opensearch.index.store.remote.filecache.FileCacheSettings.DATA_TO_FILE_CACHE_SIZE_RATIO_SETTING;
import static org.opensearch.test.NodeRoles.clusterManagerOnlyNode;
import static org.opensearch.test.NodeRoles.dataNode;
+import static org.opensearch.test.NodeRoles.onlyRole;
+import static org.opensearch.test.NodeRoles.onlyRoles;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
@@ -132,21 +140,24 @@ public void testCreateSearchableSnapshot() throws Exception {
public void testSnapshottingSearchableSnapshots() throws Exception {
final String repoName = "test-repo";
+ final String initSnapName = "initial-snapshot";
final String indexName = "test-idx";
+ final String repeatSnapNamePrefix = "test-repeated-snap-";
+ final String repeatIndexNamePrefix = indexName + "-copy-";
final Client client = client();
// create an index, add data, snapshot it, then delete it
internalCluster().ensureAtLeastNumDataNodes(1);
createIndexWithDocsAndEnsureGreen(0, 100, indexName);
createRepositoryWithSettings(null, repoName);
- takeSnapshot(client, "initial-snapshot", repoName, indexName);
+ takeSnapshot(client, initSnapName, repoName, indexName);
deleteIndicesAndEnsureGreen(client, indexName);
// restore the index as a searchable snapshot
internalCluster().ensureAtLeastNumSearchNodes(1);
client.admin()
.cluster()
- .prepareRestoreSnapshot(repoName, "initial-snapshot")
+ .prepareRestoreSnapshot(repoName, initSnapName)
.setRenamePattern("(.+)")
.setRenameReplacement("$1-copy-0")
.setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT)
@@ -159,7 +170,7 @@ public void testSnapshottingSearchableSnapshots() throws Exception {
// Test that the searchable snapshot index can continue to be snapshotted and restored
for (int i = 0; i < 4; i++) {
- final String repeatedSnapshotName = "test-repeated-snap-" + i;
+ final String repeatedSnapshotName = repeatSnapNamePrefix + i;
takeSnapshot(client, repeatedSnapshotName, repoName);
deleteIndicesAndEnsureGreen(client, "_all");
client.admin()
@@ -181,21 +192,34 @@ public void testSnapshottingSearchableSnapshots() throws Exception {
final Map> snapshotInfoMap = response.getSnapshots()
.stream()
.collect(Collectors.toMap(s -> s.snapshotId().getName(), SnapshotInfo::indices));
- assertEquals(
- Map.of(
- "initial-snapshot",
- List.of("test-idx"),
- "test-repeated-snap-0",
- List.of("test-idx-copy-0"),
- "test-repeated-snap-1",
- List.of("test-idx-copy-1"),
- "test-repeated-snap-2",
- List.of("test-idx-copy-2"),
- "test-repeated-snap-3",
- List.of("test-idx-copy-3")
- ),
- snapshotInfoMap
- );
+ final Map> expect = new HashMap<>();
+ expect.put(initSnapName, List.of(indexName));
+ IntStream.range(0, 4).forEach(i -> expect.put(repeatSnapNamePrefix + i, List.of(repeatIndexNamePrefix + i)));
+ assertEquals(expect, snapshotInfoMap);
+
+ String[] snapNames = new String[5];
+ IntStream.range(0, 4).forEach(i -> snapNames[i] = repeatSnapNamePrefix + i);
+ snapNames[4] = initSnapName;
+ SnapshotsStatusResponse snapshotsStatusResponse = client.admin()
+ .cluster()
+ .prepareSnapshotStatus(repoName)
+ .addSnapshots(snapNames)
+ .execute()
+ .actionGet();
+ snapshotsStatusResponse.getSnapshots().forEach(s -> {
+ String snapName = s.getSnapshot().getSnapshotId().getName();
+ assertEquals(1, s.getIndices().size());
+ assertEquals(1, s.getShards().size());
+ if (snapName.equals("initial-snapshot")) {
+ assertNotNull(s.getIndices().get("test-idx"));
+ assertTrue(s.getShards().get(0).getStats().getTotalFileCount() > 0);
+ } else {
+ assertTrue(snapName.startsWith(repeatSnapNamePrefix));
+ assertEquals(1, s.getIndices().size());
+ assertNotNull(s.getIndices().get(repeatIndexNamePrefix + snapName.substring(repeatSnapNamePrefix.length())));
+ assertEquals(0L, s.getShards().get(0).getStats().getTotalFileCount());
+ }
+ });
}
/**
@@ -990,6 +1014,26 @@ public void cleanup() throws Exception {
);
}
+ public void testStartSearchNode() throws Exception {
+ // test start dedicated search node
+ internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.SEARCH_ROLE)));
+ // test start node without search role
+ internalCluster().startNode(Settings.builder().put(onlyRole(DiscoveryNodeRole.DATA_ROLE)));
+ // test start non-dedicated search node with TIERED_REMOTE_INDEX feature enabled
+ internalCluster().startNode(
+ Settings.builder()
+ .put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE)))
+ .put(TIERED_REMOTE_INDEX, true)
+ );
+ // test start non-dedicated search node
+ assertThrows(
+ SettingsException.class,
+ () -> internalCluster().startNode(
+ Settings.builder().put(onlyRoles(Set.of(DiscoveryNodeRole.SEARCH_ROLE, DiscoveryNodeRole.DATA_ROLE)))
+ )
+ );
+ }
+
private void assertSearchableSnapshotIndexDirectoryExistence(String nodeName, Index index, boolean exists) throws Exception {
final Node node = internalCluster().getInstance(Node.class, nodeName);
final ShardId shardId = new ShardId(index, 0);
diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java
index 7e4ed186dd665..ac321d961679a 100644
--- a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java
+++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java
@@ -53,6 +53,10 @@ public long getCreationTime() {
return creationTime;
}
+ public long getKeepAlive() {
+ return keepAlive;
+ }
+
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(pitId);
diff --git a/server/src/main/java/org/opensearch/cluster/ClusterModule.java b/server/src/main/java/org/opensearch/cluster/ClusterModule.java
index c7fd263bda56a..bb51c42252448 100644
--- a/server/src/main/java/org/opensearch/cluster/ClusterModule.java
+++ b/server/src/main/java/org/opensearch/cluster/ClusterModule.java
@@ -48,6 +48,7 @@
import org.opensearch.cluster.metadata.MetadataIndexTemplateService;
import org.opensearch.cluster.metadata.MetadataMappingService;
import org.opensearch.cluster.metadata.MetadataUpdateSettingsService;
+import org.opensearch.cluster.metadata.QueryGroupMetadata;
import org.opensearch.cluster.metadata.RepositoriesMetadata;
import org.opensearch.cluster.metadata.ViewMetadata;
import org.opensearch.cluster.metadata.WeightedRoutingMetadata;
@@ -214,6 +215,8 @@ public static List getNamedWriteables() {
DecommissionAttributeMetadata::new,
DecommissionAttributeMetadata::readDiffFrom
);
+
+ registerMetadataCustom(entries, QueryGroupMetadata.TYPE, QueryGroupMetadata::new, QueryGroupMetadata::readDiffFrom);
// Task Status (not Diffable)
entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new));
return entries;
diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java
index 72a3519aca6f8..4c76858107ed8 100644
--- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java
+++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java
@@ -45,6 +45,7 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
/**
* Information passed during repository cleanup
@@ -118,6 +119,24 @@ public Version getMinimalSupportedVersion() {
return LegacyESVersion.fromId(7040099);
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ RepositoryCleanupInProgress that = (RepositoryCleanupInProgress) o;
+ return entries.equals(that.entries);
+ }
+
+ @Override
+ public int hashCode() {
+ return 31 + entries.hashCode();
+ }
+
/**
* Entry in the collection.
*
@@ -155,6 +174,23 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeLong(repositoryStateId);
}
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ RepositoryCleanupInProgress.Entry that = (RepositoryCleanupInProgress.Entry) o;
+ return repository.equals(that.repository) && repositoryStateId == that.repositoryStateId;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(repository, repositoryStateId);
+ }
+
@Override
public String toString() {
return "{" + repository + '}' + '{' + repositoryStateId + '}';
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
index 232f900f25375..2a54f6444ffda 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java
@@ -981,6 +981,10 @@ public static boolean isTemplatesMetadataEqual(Metadata metadata1, Metadata meta
return metadata1.templates.equals(metadata2.templates);
}
+ public static boolean isHashesOfConsistentSettingsEqual(Metadata metadata1, Metadata metadata2) {
+ return metadata1.hashesOfConsistentSettings.equals(metadata2.hashesOfConsistentSettings);
+ }
+
public static boolean isCustomMetadataEqual(Metadata metadata1, Metadata metadata2) {
int customCount1 = 0;
for (Map.Entry cursor : metadata1.customs.entrySet()) {
@@ -1283,6 +1287,7 @@ public Builder templates(Map templates) {
}
public Builder templates(TemplatesMetadata templatesMetadata) {
+ this.templates.clear();
this.templates.putAll(templatesMetadata.getTemplates());
return this;
}
@@ -1363,6 +1368,25 @@ public Builder removeDataStream(String name) {
return this;
}
+ public Builder queryGroups(final Map queryGroups) {
+ this.customs.put(QueryGroupMetadata.TYPE, new QueryGroupMetadata(queryGroups));
+ return this;
+ }
+
+ public Builder put(final QueryGroup queryGroup) {
+ Objects.requireNonNull(queryGroup, "queryGroup should not be null");
+ Map existing = new HashMap<>(getQueryGroups());
+ existing.put(queryGroup.get_id(), queryGroup);
+ return queryGroups(existing);
+ }
+
+ private Map getQueryGroups() {
+ return Optional.ofNullable(this.customs.get(QueryGroupMetadata.TYPE))
+ .map(o -> (QueryGroupMetadata) o)
+ .map(QueryGroupMetadata::queryGroups)
+ .orElse(Collections.emptyMap());
+ }
+
private Map getViews() {
return Optional.ofNullable(customs.get(ViewMetadata.TYPE))
.map(o -> (ViewMetadata) o)
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
index 16edec112f123..7973745ce84b3 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java
@@ -85,6 +85,7 @@
import org.opensearch.index.IndexNotFoundException;
import org.opensearch.index.IndexService;
import org.opensearch.index.IndexSettings;
+import org.opensearch.index.compositeindex.CompositeIndexValidator;
import org.opensearch.index.mapper.DocumentMapper;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.mapper.MapperService.MergeReason;
@@ -1318,6 +1319,10 @@ private static void updateIndexMappingsAndBuildSortOrder(
}
}
+ if (mapperService.isCompositeIndexPresent()) {
+ CompositeIndexValidator.validate(mapperService, indexService.getCompositeIndexSettings(), indexService.getIndexSettings());
+ }
+
if (sourceMetadata == null) {
// now that the mapping is merged we can validate the index sort.
// we cannot validate for index shrinking since the mapping is empty
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java
index 1406287149e8d..43894db86c512 100644
--- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java
+++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataMappingService.java
@@ -55,6 +55,7 @@
import org.opensearch.core.common.Strings;
import org.opensearch.core.index.Index;
import org.opensearch.index.IndexService;
+import org.opensearch.index.compositeindex.CompositeIndexValidator;
import org.opensearch.index.mapper.DocumentMapper;
import org.opensearch.index.mapper.MapperService;
import org.opensearch.index.mapper.MapperService.MergeReason;
@@ -282,6 +283,7 @@ private ClusterState applyRequest(
// first, simulate: just call merge and ignore the result
existingMapper.merge(newMapper.mapping(), MergeReason.MAPPING_UPDATE);
}
+
}
Metadata.Builder builder = Metadata.builder(metadata);
boolean updated = false;
@@ -291,7 +293,7 @@ private ClusterState applyRequest(
// we use the exact same indexService and metadata we used to validate above here to actually apply the update
final Index index = indexMetadata.getIndex();
final MapperService mapperService = indexMapperServices.get(index);
-
+ boolean isCompositeFieldPresent = !mapperService.getCompositeFieldTypes().isEmpty();
CompressedXContent existingSource = null;
DocumentMapper existingMapper = mapperService.documentMapper();
if (existingMapper != null) {
@@ -302,6 +304,14 @@ private ClusterState applyRequest(
mappingUpdateSource,
MergeReason.MAPPING_UPDATE
);
+
+ CompositeIndexValidator.validate(
+ mapperService,
+ indicesService.getCompositeIndexSettings(),
+ mapperService.getIndexSettings(),
+ isCompositeFieldPresent
+ );
+
CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) {
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java
new file mode 100644
index 0000000000000..beaab198073df
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroup.java
@@ -0,0 +1,317 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.metadata;
+
+import org.opensearch.cluster.AbstractDiffable;
+import org.opensearch.cluster.Diff;
+import org.opensearch.common.UUIDs;
+import org.opensearch.common.annotation.ExperimentalApi;
+import org.opensearch.core.common.io.stream.StreamInput;
+import org.opensearch.core.common.io.stream.StreamOutput;
+import org.opensearch.core.xcontent.ToXContentObject;
+import org.opensearch.core.xcontent.XContentBuilder;
+import org.opensearch.core.xcontent.XContentParser;
+import org.opensearch.search.ResourceType;
+import org.joda.time.Instant;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * Class to define the QueryGroup schema
+ * {
+ * "_id": "fafjafjkaf9ag8a9ga9g7ag0aagaga",
+ * "resourceLimits": {
+ * "jvm": 0.4
+ * },
+ * "resiliency_mode": "enforced",
+ * "name": "analytics",
+ * "updatedAt": 4513232415
+ * }
+ */
+@ExperimentalApi
+public class QueryGroup extends AbstractDiffable implements ToXContentObject {
+
+ private static final int MAX_CHARS_ALLOWED_IN_NAME = 50;
+ private final String name;
+ private final String _id;
+ private final ResiliencyMode resiliencyMode;
+ // It is an epoch in millis
+ private final long updatedAtInMillis;
+ private final Map resourceLimits;
+
+ public QueryGroup(String name, ResiliencyMode resiliencyMode, Map resourceLimits) {
+ this(name, UUIDs.randomBase64UUID(), resiliencyMode, resourceLimits, Instant.now().getMillis());
+ }
+
+ public QueryGroup(String name, String _id, ResiliencyMode resiliencyMode, Map resourceLimits, long updatedAt) {
+ Objects.requireNonNull(name, "QueryGroup.name can't be null");
+ Objects.requireNonNull(resourceLimits, "QueryGroup.resourceLimits can't be null");
+ Objects.requireNonNull(resiliencyMode, "QueryGroup.resiliencyMode can't be null");
+ Objects.requireNonNull(_id, "QueryGroup._id can't be null");
+
+ if (name.length() > MAX_CHARS_ALLOWED_IN_NAME) {
+ throw new IllegalArgumentException("QueryGroup.name shouldn't be more than 50 chars long");
+ }
+
+ if (resourceLimits.isEmpty()) {
+ throw new IllegalArgumentException("QueryGroup.resourceLimits should at least have 1 resource limit");
+ }
+ validateResourceLimits(resourceLimits);
+ if (!isValid(updatedAt)) {
+ throw new IllegalArgumentException("QueryGroup.updatedAtInMillis is not a valid epoch");
+ }
+
+ this.name = name;
+ this._id = _id;
+ this.resiliencyMode = resiliencyMode;
+ this.resourceLimits = resourceLimits;
+ this.updatedAtInMillis = updatedAt;
+ }
+
+ private static boolean isValid(long updatedAt) {
+ long minValidTimestamp = Instant.ofEpochMilli(0L).getMillis();
+
+ // Use Instant.now() to get the current time in seconds since epoch
+ long currentSeconds = Instant.now().getMillis();
+
+ // Check if the timestamp is within a reasonable range
+ return minValidTimestamp <= updatedAt && updatedAt <= currentSeconds;
+ }
+
+ public QueryGroup(StreamInput in) throws IOException {
+ this(
+ in.readString(),
+ in.readString(),
+ ResiliencyMode.fromName(in.readString()),
+ in.readMap((i) -> ResourceType.fromName(i.readString()), StreamInput::readGenericValue),
+ in.readLong()
+ );
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(_id);
+ out.writeString(resiliencyMode.getName());
+ out.writeMap(resourceLimits, ResourceType::writeTo, StreamOutput::writeGenericValue);
+ out.writeLong(updatedAtInMillis);
+ }
+
+ private void validateResourceLimits(Map resourceLimits) {
+ for (Map.Entry resource : resourceLimits.entrySet()) {
+ Double threshold = (Double) resource.getValue();
+ Objects.requireNonNull(resource.getKey(), "resourceName can't be null");
+ Objects.requireNonNull(threshold, "resource limit threshold for" + resource.getKey().getName() + " : can't be null");
+
+ if (Double.compare(threshold, 1.0) > 0) {
+ throw new IllegalArgumentException("resource value should be less than 1.0");
+ }
+ }
+ }
+
+ @Override
+ public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
+ builder.startObject();
+ builder.field("_id", _id);
+ builder.field("name", name);
+ builder.field("resiliency_mode", resiliencyMode.getName());
+ builder.field("updatedAt", updatedAtInMillis);
+ // write resource limits
+ builder.startObject("resourceLimits");
+ for (ResourceType resourceType : ResourceType.values()) {
+ if (resourceLimits.containsKey(resourceType)) {
+ builder.field(resourceType.getName(), resourceLimits.get(resourceType));
+ }
+ }
+ builder.endObject();
+
+ builder.endObject();
+ return builder;
+ }
+
+ public static QueryGroup fromXContent(final XContentParser parser) throws IOException {
+ if (parser.currentToken() == null) { // fresh parser? move to the first token
+ parser.nextToken();
+ }
+
+ Builder builder = builder();
+
+ XContentParser.Token token = parser.currentToken();
+
+ if (token != XContentParser.Token.START_OBJECT) {
+ throw new IllegalArgumentException("Expected START_OBJECT token but found [" + parser.currentName() + "]");
+ }
+
+ String fieldName = "";
+ // Map to hold resources
+ final Map resourceLimits = new HashMap<>();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else if (token.isValue()) {
+ if (fieldName.equals("_id")) {
+ builder._id(parser.text());
+ } else if (fieldName.equals("name")) {
+ builder.name(parser.text());
+ } else if (fieldName.equals("resiliency_mode")) {
+ builder.mode(parser.text());
+ } else if (fieldName.equals("updatedAt")) {
+ builder.updatedAt(parser.longValue());
+ } else {
+ throw new IllegalArgumentException(fieldName + " is not a valid field in QueryGroup");
+ }
+ } else if (token == XContentParser.Token.START_OBJECT) {
+
+ if (!fieldName.equals("resourceLimits")) {
+ throw new IllegalArgumentException(
+ "QueryGroup.resourceLimits is an object and expected token was { " + " but found " + token
+ );
+ }
+
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else {
+ resourceLimits.put(ResourceType.fromName(fieldName), parser.doubleValue());
+ }
+ }
+
+ }
+ }
+ builder.resourceLimits(resourceLimits);
+ return builder.build();
+ }
+
+ public static Diff readDiff(final StreamInput in) throws IOException {
+ return readDiffFrom(QueryGroup::new, in);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ QueryGroup that = (QueryGroup) o;
+ return Objects.equals(name, that.name)
+ && Objects.equals(resourceLimits, that.resourceLimits)
+ && Objects.equals(_id, that._id)
+ && updatedAtInMillis == that.updatedAtInMillis;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, resourceLimits, updatedAtInMillis, _id);
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public ResiliencyMode getResiliencyMode() {
+ return resiliencyMode;
+ }
+
+ public Map getResourceLimits() {
+ return resourceLimits;
+ }
+
+ public String get_id() {
+ return _id;
+ }
+
+ public long getUpdatedAtInMillis() {
+ return updatedAtInMillis;
+ }
+
+ /**
+ * builder method for the {@link QueryGroup}
+ * @return Builder object
+ */
+ public static Builder builder() {
+ return new Builder();
+ }
+
+ /**
+ * This enum models the different QueryGroup resiliency modes
+ * SOFT - means that this query group can consume more than query group resource limits if node is not in duress
+ * ENFORCED - means that it will never breach the assigned limits and will cancel as soon as the limits are breached
+ * MONITOR - it will not cause any cancellation but just log the eligible task cancellations
+ */
+ @ExperimentalApi
+ public enum ResiliencyMode {
+ SOFT("soft"),
+ ENFORCED("enforced"),
+ MONITOR("monitor");
+
+ private final String name;
+
+ ResiliencyMode(String mode) {
+ this.name = mode;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public static ResiliencyMode fromName(String s) {
+ for (ResiliencyMode mode : values()) {
+ if (mode.getName().equalsIgnoreCase(s)) return mode;
+
+ }
+ throw new IllegalArgumentException("Invalid value for QueryGroupMode: " + s);
+ }
+
+ }
+
+ /**
+ * Builder class for {@link QueryGroup}
+ */
+ @ExperimentalApi
+ public static class Builder {
+ private String name;
+ private String _id;
+ private ResiliencyMode resiliencyMode;
+ private long updatedAt;
+ private Map resourceLimits;
+
+ private Builder() {}
+
+ public Builder name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public Builder _id(String _id) {
+ this._id = _id;
+ return this;
+ }
+
+ public Builder mode(String mode) {
+ this.resiliencyMode = ResiliencyMode.fromName(mode);
+ return this;
+ }
+
+ public Builder updatedAt(long updatedAt) {
+ this.updatedAt = updatedAt;
+ return this;
+ }
+
+ public Builder resourceLimits(Map resourceLimits) {
+ this.resourceLimits = resourceLimits;
+ return this;
+ }
+
+ public QueryGroup build() {
+ return new QueryGroup(name, _id, resiliencyMode, resourceLimits, updatedAt);
+ }
+
+ }
+}
diff --git a/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java
new file mode 100644
index 0000000000000..79732bc505ee2
--- /dev/null
+++ b/server/src/main/java/org/opensearch/cluster/metadata/QueryGroupMetadata.java
@@ -0,0 +1,185 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.cluster.metadata;
+
+import org.opensearch.Version;
+import org.opensearch.cluster.Diff;
+import org.opensearch.cluster.DiffableUtils;
+import org.opensearch.cluster.NamedDiff;
+import org.opensearch.core.ParseField;
+import org.opensearch.core.common.Strings;
+import org.opensearch.core.common.io.stream.StreamInput;
+import org.opensearch.core.common.io.stream.StreamOutput;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
+import org.opensearch.core.xcontent.XContentBuilder;
+import org.opensearch.core.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.opensearch.cluster.metadata.Metadata.ALL_CONTEXTS;
+
+/**
+ * This class holds the QueryGroupMetadata
+ * sample schema
+ * {
+ * "queryGroups": {
+ * "_id": {
+ * {@link QueryGroup}
+ * },
+ * ...
+ * }
+ * }
+ */
+public class QueryGroupMetadata implements Metadata.Custom {
+ public static final String TYPE = "queryGroups";
+ private static final ParseField QUERY_GROUP_FIELD = new ParseField("queryGroups");
+
+ private final Map queryGroups;
+
+ public QueryGroupMetadata(Map queryGroups) {
+ this.queryGroups = queryGroups;
+ }
+
+ public QueryGroupMetadata(StreamInput in) throws IOException {
+ this.queryGroups = in.readMap(StreamInput::readString, QueryGroup::new);
+ }
+
+ public Map queryGroups() {
+ return this.queryGroups;
+ }
+
+ /**
+ * Returns the name of the writeable object
+ */
+ @Override
+ public String getWriteableName() {
+ return TYPE;
+ }
+
+ /**
+ * The minimal version of the recipient this object can be sent to
+ */
+ @Override
+ public Version getMinimalSupportedVersion() {
+ return Version.V_3_0_0;
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeMap(queryGroups, StreamOutput::writeString, (stream, val) -> val.writeTo(stream));
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ for (Map.Entry entry : queryGroups.entrySet()) {
+ builder.field(entry.getKey(), entry.getValue());
+ }
+ return builder;
+ }
+
+ public static QueryGroupMetadata fromXContent(XContentParser parser) throws IOException {
+ Map queryGroupMap = new HashMap<>();
+
+ if (parser.currentToken() == null) {
+ parser.nextToken();
+ }
+
+ if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
+ throw new IllegalArgumentException(
+ "QueryGroupMetadata.fromXContent was expecting a { token but found : " + parser.currentToken()
+ );
+ }
+ XContentParser.Token token = parser.currentToken();
+ String fieldName = parser.currentName();
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
+ fieldName = parser.currentName();
+ } else {
+ QueryGroup queryGroup = QueryGroup.fromXContent(parser);
+ queryGroupMap.put(fieldName, queryGroup);
+ }
+ }
+
+ return new QueryGroupMetadata(queryGroupMap);
+ }
+
+ @Override
+ public Diff diff(final Metadata.Custom previousState) {
+ return new QueryGroupMetadataDiff((QueryGroupMetadata) previousState, this);
+ }
+
+ public static NamedDiff readDiffFrom(StreamInput in) throws IOException {
+ return new QueryGroupMetadataDiff(in);
+ }
+
+ @Override
+ public EnumSet context() {
+ return ALL_CONTEXTS;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ QueryGroupMetadata that = (QueryGroupMetadata) o;
+ return Objects.equals(queryGroups, that.queryGroups);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(queryGroups);
+ }
+
+ @Override
+ public String toString() {
+ return Strings.toString(MediaTypeRegistry.JSON, this);
+ }
+
+ /**
+ * QueryGroupMetadataDiff
+ */
+ static class QueryGroupMetadataDiff implements NamedDiff {
+ final Diff